diff --git a/config/darwin-config.py b/config/darwin-config.py
index 049fe62f3b0..a2ce9c3331a 100644
--- a/config/darwin-config.py
+++ b/config/darwin-config.py
@@ -47,18 +47,21 @@ if MACOSX_ARCHITECTURE == 'ppc':
# CC = 'gcc-3.3'
# CXX = 'g++-3.3'
MAC_MIN_VERS = '10.4'
+ MACOSX_DEPLOYMENT_TARGET = '10.4'
MACOSX_SDK='/Developer/SDKs/MacOSX10.4u.sdk'
LCGDIR = '#../lib/darwin-8.0.0-powerpc'
CC = 'gcc-4.0'
CXX = 'g++-4.0'
elif MACOSX_ARCHITECTURE == 'i386':
MAC_MIN_VERS = '10.4'
+ MACOSX_DEPLOYMENT_TARGET = '10.4'
MACOSX_SDK='/Developer/SDKs/MacOSX10.4u.sdk'
LCGDIR = '#../lib/darwin-8.x.i386'
CC = 'gcc-4.0'
CXX = 'g++-4.0'
else :
MAC_MIN_VERS = '10.5'
+ MACOSX_DEPLOYMENT_TARGET = '10.5'
MACOSX_SDK='/Developer/SDKs/MacOSX10.5.sdk'
LCGDIR = '#../lib/darwin-9.x.universal'
CC = 'gcc-4.2'
diff --git a/extern/bullet2/src/BulletSoftBody/btSoftBodyHelpers.cpp b/extern/bullet2/src/BulletSoftBody/btSoftBodyHelpers.cpp
index 4f81b0953a2..61aac5b4ce5 100644
--- a/extern/bullet2/src/BulletSoftBody/btSoftBodyHelpers.cpp
+++ b/extern/bullet2/src/BulletSoftBody/btSoftBodyHelpers.cpp
@@ -793,7 +793,7 @@ btSoftBody* btSoftBodyHelpers::CreateEllipsoid(btSoftBodyWorldInfo& worldInfo,c
//
btSoftBody* btSoftBodyHelpers::CreateFromTriMesh(btSoftBodyWorldInfo& worldInfo,const btScalar* vertices,
const int* triangles,
- int ntriangles)
+ int ntriangles, bool randomizeConstraints)
{
int maxidx=0;
int i,j,ni;
@@ -828,14 +828,16 @@ btSoftBody* btSoftBodyHelpers::CreateFromTriMesh(btSoftBodyWorldInfo& worldInfo
#undef IDX
psb->appendFace(idx[0],idx[1],idx[2]);
}
- // don't randomize now, let's give a chance to the application to set face data
- //psb->randomizeConstraints();
+ if (randomizeConstraints)
+ {
+ psb->randomizeConstraints();
+ }
return(psb);
}
//
btSoftBody* btSoftBodyHelpers::CreateFromConvexHull(btSoftBodyWorldInfo& worldInfo, const btVector3* vertices,
- int nvertices)
+ int nvertices, bool randomizeConstraints)
{
HullDesc hdsc(QF_TRIANGLES,nvertices,vertices);
HullResult hres;
@@ -855,6 +857,9 @@ btSoftBody* btSoftBodyHelpers::CreateFromConvexHull(btSoftBodyWorldInfo& worldI
psb->appendFace(idx[0],idx[1],idx[2]);
}
hlib.ReleaseResult(hres);
- psb->randomizeConstraints();
+ if (randomizeConstraints)
+ {
+ psb->randomizeConstraints();
+ }
return(psb);
}
diff --git a/extern/bullet2/src/BulletSoftBody/btSoftBodyHelpers.h b/extern/bullet2/src/BulletSoftBody/btSoftBodyHelpers.h
index 0e3b50397ee..5eb2ebc0735 100644
--- a/extern/bullet2/src/BulletSoftBody/btSoftBodyHelpers.h
+++ b/extern/bullet2/src/BulletSoftBody/btSoftBodyHelpers.h
@@ -109,11 +109,13 @@ struct btSoftBodyHelpers
static btSoftBody* CreateFromTriMesh( btSoftBodyWorldInfo& worldInfo,
const btScalar* vertices,
const int* triangles,
- int ntriangles);
+ int ntriangles,
+ bool randomizeConstraints = true);
/* Create from convex-hull */
static btSoftBody* CreateFromConvexHull( btSoftBodyWorldInfo& worldInfo,
const btVector3* vertices,
- int nvertices);
+ int nvertices,
+ bool randomizeConstraints = true);
};
#endif //SOFT_BODY_HELPERS_H
diff --git a/intern/audaspace/CMakeLists.txt b/intern/audaspace/CMakeLists.txt
index 587ef30979b..0965a467201 100644
--- a/intern/audaspace/CMakeLists.txt
+++ b/intern/audaspace/CMakeLists.txt
@@ -40,11 +40,6 @@ IF(WITH_OPENAL)
SET(INC ${INC} OpenAL ${OPENAL_INCLUDE_DIR})
FILE(GLOB OPENALSRC OpenAL/*.cpp)
ADD_DEFINITIONS(-DWITH_OPENAL)
-
- STRING(REGEX MATCH ".*ramework.*" FRAMEWORK ${OPENAL_INCLUDE_DIR})
- IF(FRAMEWORK)
- ADD_DEFINITIONS(-DAPPLE_FRAMEWORK_FIX)
- ENDIF(FRAMEWORK)
ENDIF(WITH_OPENAL)
IF(WITH_JACK)
@@ -59,6 +54,12 @@ IF(WITH_SNDFILE)
ADD_DEFINITIONS(-DWITH_SNDFILE)
ENDIF(WITH_SNDFILE)
-SET(SRC ${SRC} ${FFMPEGSRC} ${SNDFILESRC} ${SDLSRC} ${OPENALSRC} ${JACKSRC})
+IF(WITH_FFTW3)
+ SET(INC ${INC} fftw ${FFTW3_INC})
+ FILE(GLOB FFTW3SRC fftw/*.cpp)
+ ADD_DEFINITIONS(-DWITH_FFTW3)
+ENDIF(WITH_FFTW3)
+
+SET(SRC ${SRC} ${FFMPEGSRC} ${SNDFILESRC} ${FFTW3SRC} ${SDLSRC} ${OPENALSRC} ${JACKSRC})
BLENDERLIB(bf_audaspace "${SRC}" "${INC}")
diff --git a/intern/audaspace/FX/AUD_LoopFactory.h b/intern/audaspace/FX/AUD_LoopFactory.h
index 461d8c9ab69..c81d906b82e 100644
--- a/intern/audaspace/FX/AUD_LoopFactory.h
+++ b/intern/audaspace/FX/AUD_LoopFactory.h
@@ -38,7 +38,7 @@ private:
/**
* The loop count.
*/
- float m_loop;
+ int m_loop;
public:
/**
diff --git a/intern/audaspace/FX/AUD_RectifyFactory.cpp b/intern/audaspace/FX/AUD_RectifyFactory.cpp
new file mode 100644
index 00000000000..f97defd793b
--- /dev/null
+++ b/intern/audaspace/FX/AUD_RectifyFactory.cpp
@@ -0,0 +1,45 @@
+/*
+ * $Id$
+ *
+ * ***** BEGIN LGPL LICENSE BLOCK *****
+ *
+ * Copyright 2009 Jörg Hermann Müller
+ *
+ * This file is part of AudaSpace.
+ *
+ * AudaSpace is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * AudaSpace is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with AudaSpace. If not, see .
+ *
+ * ***** END LGPL LICENSE BLOCK *****
+ */
+
+#include "AUD_RectifyFactory.h"
+#include "AUD_RectifyReader.h"
+
+AUD_RectifyFactory::AUD_RectifyFactory(AUD_IFactory* factory) :
+ AUD_EffectFactory(factory) {}
+
+AUD_RectifyFactory::AUD_RectifyFactory() :
+ AUD_EffectFactory(0) {}
+
+AUD_IReader* AUD_RectifyFactory::createReader()
+{
+ AUD_IReader* reader = getReader();
+
+ if(reader != 0)
+ {
+ reader = new AUD_RectifyReader(reader); AUD_NEW("reader")
+ }
+
+ return reader;
+}
diff --git a/intern/audaspace/FX/AUD_RectifyFactory.h b/intern/audaspace/FX/AUD_RectifyFactory.h
new file mode 100644
index 00000000000..ed00620d318
--- /dev/null
+++ b/intern/audaspace/FX/AUD_RectifyFactory.h
@@ -0,0 +1,51 @@
+/*
+ * $Id$
+ *
+ * ***** BEGIN LGPL LICENSE BLOCK *****
+ *
+ * Copyright 2009 Jörg Hermann Müller
+ *
+ * This file is part of AudaSpace.
+ *
+ * AudaSpace is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * AudaSpace is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with AudaSpace. If not, see .
+ *
+ * ***** END LGPL LICENSE BLOCK *****
+ */
+
+#ifndef AUD_RECTIFYFACTORY
+#define AUD_RECTIFYFACTORY
+
+#include "AUD_EffectFactory.h"
+
+/**
+ * This factory rectifies another factory.
+ */
+class AUD_RectifyFactory : public AUD_EffectFactory
+{
+public:
+ /**
+ * Creates a new rectify factory.
+ * \param factory The input factory.
+ */
+ AUD_RectifyFactory(AUD_IFactory* factory = 0);
+
+ /**
+ * Creates a new rectify factory.
+ */
+ AUD_RectifyFactory();
+
+ virtual AUD_IReader* createReader();
+};
+
+#endif //AUD_RECTIFYFACTORY
diff --git a/intern/audaspace/FX/AUD_RectifyReader.cpp b/intern/audaspace/FX/AUD_RectifyReader.cpp
new file mode 100644
index 00000000000..9839aefa838
--- /dev/null
+++ b/intern/audaspace/FX/AUD_RectifyReader.cpp
@@ -0,0 +1,82 @@
+/*
+ * $Id$
+ *
+ * ***** BEGIN LGPL LICENSE BLOCK *****
+ *
+ * Copyright 2009 Jörg Hermann Müller
+ *
+ * This file is part of AudaSpace.
+ *
+ * AudaSpace is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * AudaSpace is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with AudaSpace. If not, see .
+ *
+ * ***** END LGPL LICENSE BLOCK *****
+ */
+
+#include "AUD_RectifyReader.h"
+#include "AUD_Buffer.h"
+
+#include
+
+AUD_RectifyReader::AUD_RectifyReader(AUD_IReader* reader) :
+ AUD_EffectReader(reader)
+{
+ int bigendian = 1;
+ bigendian = (((char*)&bigendian)[0]) ? 0: 1; // 1 if Big Endian
+
+ switch(m_reader->getSpecs().format)
+ {
+ case AUD_FORMAT_S16:
+ m_rectify = AUD_rectify;
+ break;
+ case AUD_FORMAT_S32:
+ m_rectify = AUD_rectify;
+ break;
+ case AUD_FORMAT_FLOAT32:
+ m_rectify = AUD_rectify;
+ break;
+ case AUD_FORMAT_FLOAT64:
+ m_rectify = AUD_rectify;
+ break;
+ case AUD_FORMAT_U8:
+ m_rectify = AUD_rectify_u8;
+ break;
+ case AUD_FORMAT_S24:
+ m_rectify = bigendian ? AUD_rectify_s24_be : AUD_rectify_s24_le;
+ break;
+ default:
+ delete m_reader;
+ AUD_THROW(AUD_ERROR_READER);
+ }
+
+ m_buffer = new AUD_Buffer(); AUD_NEW("buffer")
+}
+
+AUD_RectifyReader::~AUD_RectifyReader()
+{
+ delete m_buffer; AUD_DELETE("buffer")
+}
+
+void AUD_RectifyReader::read(int & length, sample_t* & buffer)
+{
+ sample_t* buf;
+ AUD_Specs specs = m_reader->getSpecs();
+
+ m_reader->read(length, buf);
+ if(m_buffer->getSize() < length*AUD_SAMPLE_SIZE(specs))
+ m_buffer->resize(length*AUD_SAMPLE_SIZE(specs));
+
+ buffer = m_buffer->getBuffer();
+
+ m_rectify(buffer, buf, length * specs.channels);
+}
diff --git a/intern/audaspace/FX/AUD_RectifyReader.h b/intern/audaspace/FX/AUD_RectifyReader.h
new file mode 100644
index 00000000000..17423811cdc
--- /dev/null
+++ b/intern/audaspace/FX/AUD_RectifyReader.h
@@ -0,0 +1,65 @@
+/*
+ * $Id$
+ *
+ * ***** BEGIN LGPL LICENSE BLOCK *****
+ *
+ * Copyright 2009 Jörg Hermann Müller
+ *
+ * This file is part of AudaSpace.
+ *
+ * AudaSpace is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * AudaSpace is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with AudaSpace. If not, see .
+ *
+ * ***** END LGPL LICENSE BLOCK *****
+ */
+
+#ifndef AUD_RECTIFYREADER
+#define AUD_RECTIFYREADER
+
+#include "AUD_EffectReader.h"
+#include "AUD_ConverterFunctions.h"
+class AUD_Buffer;
+
+/**
+ * This class reads another reader and rectifies it.
+ */
+class AUD_RectifyReader : public AUD_EffectReader
+{
+private:
+ /**
+ * The playback buffer.
+ */
+ AUD_Buffer *m_buffer;
+
+ /**
+ * Rectifying function.
+ */
+ AUD_rectify_f m_rectify;
+
+public:
+ /**
+ * Creates a new rectify reader.
+ * \param reader The reader to read from.
+ * \exception AUD_Exception Thrown if the reader specified is NULL.
+ */
+ AUD_RectifyReader(AUD_IReader* reader);
+
+ /**
+ * Destroys the reader.
+ */
+ virtual ~AUD_RectifyReader();
+
+ virtual void read(int & length, sample_t* & buffer);
+};
+
+#endif //AUD_RECTIFYREADER
diff --git a/intern/audaspace/Makefile b/intern/audaspace/Makefile
index 474f53f0e0f..4cd15e5d9fb 100644
--- a/intern/audaspace/Makefile
+++ b/intern/audaspace/Makefile
@@ -56,6 +56,10 @@ ifeq ($(WITH_SNDFILE),true)
DIRS += sndfile
endif
+ifeq ($(WITH_FFTW3),true)
+ DIRS += fftw
+endif
+
include nan_subdirs.mk
install: $(ALL_OR_DEBUG)
@@ -80,6 +84,10 @@ ifeq ($(WITH_SNDFILE),true)
@../tools/cpifdiff.sh $(DIR)/$(DEBUG_DIR)libaud_sndfile.a $(NAN_AUDASPACE)/lib/$(DEBUG_DIR)
endif
+ifeq ($(WITH_FFTW3),true)
+ @../tools/cpifdiff.sh $(DIR)/$(DEBUG_DIR)libaud_fftw.a $(NAN_AUDASPACE)/lib/$(DEBUG_DIR)
+endif
+
ifeq ($(OS),darwin)
ranlib $(NAN_AUDASPACE)/lib/$(DEBUG_DIR)libaudaspace.a
ranlib $(NAN_AUDASPACE)/lib/$(DEBUG_DIR)libaud_src.a
@@ -102,5 +110,9 @@ ifeq ($(WITH_SNDFILE),true)
ranlib $(NAN_AUDASPACE)/lib/$(DEBUG_DIR)libaud_sndfile.a
endif
+ifeq ($(WITH_FFTW3),true)
+ ranlib $(NAN_AUDASPACE)/lib/$(DEBUG_DIR)libaud_fftw.a
+endif
+
endif
@../tools/cpifdiff.sh intern/*.h $(NAN_AUDASPACE)/include/
diff --git a/intern/audaspace/SConscript b/intern/audaspace/SConscript
index 025fa5a2379..ac3f8ca71a0 100644
--- a/intern/audaspace/SConscript
+++ b/intern/audaspace/SConscript
@@ -31,4 +31,9 @@ if env['WITH_BF_SNDFILE']:
incs += ' sndfile ' + env['BF_SNDFILE_INC']
defs.append('WITH_SNDFILE')
+if env['WITH_BF_FFTW3']:
+ sources += env.Glob('fftw/*.cpp')
+ incs += ' fftw ' + env['BF_FFTW3_INC']
+ defs.append('WITH_FFTW3')
+
env.BlenderLib ('bf_audaspace', sources, Split(incs), defs, libtype=['intern','player'], priority = [25,215] )
diff --git a/intern/audaspace/fftw/AUD_BandPassFactory.cpp b/intern/audaspace/fftw/AUD_BandPassFactory.cpp
new file mode 100644
index 00000000000..5a957081208
--- /dev/null
+++ b/intern/audaspace/fftw/AUD_BandPassFactory.cpp
@@ -0,0 +1,79 @@
+/*
+ * $Id$
+ *
+ * ***** BEGIN LGPL LICENSE BLOCK *****
+ *
+ * Copyright 2009 Jörg Hermann Müller
+ *
+ * This file is part of AudaSpace.
+ *
+ * AudaSpace is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * AudaSpace is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with AudaSpace. If not, see .
+ *
+ * ***** END LGPL LICENSE BLOCK *****
+ */
+
+#include "AUD_BandPassFactory.h"
+#include "AUD_BandPassReader.h"
+
+AUD_BandPassFactory::AUD_BandPassFactory(AUD_IFactory* factory, float low,
+ float high) :
+ AUD_EffectFactory(factory),
+ m_low(low),
+ m_high(high) {}
+
+AUD_BandPassFactory::AUD_BandPassFactory(float low, float high) :
+ AUD_EffectFactory(0),
+ m_low(low),
+ m_high(high) {}
+
+float AUD_BandPassFactory::getLow()
+{
+ return m_low;
+}
+
+float AUD_BandPassFactory::getHigh()
+{
+ return m_high;
+}
+
+void AUD_BandPassFactory::setLow(float low)
+{
+ m_low = low;
+}
+
+void AUD_BandPassFactory::setHigh(float high)
+{
+ m_high = high;
+}
+
+AUD_IReader* AUD_BandPassFactory::createReader()
+{
+ AUD_IReader* reader = getReader();
+
+ if(reader != 0)
+ {
+ if(reader->getSpecs().format == AUD_FORMAT_FLOAT32)
+ {
+ reader = new AUD_BandPassReader(reader, m_low, m_high);
+ AUD_NEW("reader")
+ }
+ else
+ {
+ delete reader; AUD_DELETE("reader")
+ return 0;
+ }
+ }
+
+ return reader;
+}
diff --git a/intern/audaspace/fftw/AUD_BandPassFactory.h b/intern/audaspace/fftw/AUD_BandPassFactory.h
new file mode 100644
index 00000000000..6e1e686bc3c
--- /dev/null
+++ b/intern/audaspace/fftw/AUD_BandPassFactory.h
@@ -0,0 +1,88 @@
+/*
+ * $Id$
+ *
+ * ***** BEGIN LGPL LICENSE BLOCK *****
+ *
+ * Copyright 2009 Jörg Hermann Müller
+ *
+ * This file is part of AudaSpace.
+ *
+ * AudaSpace is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * AudaSpace is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with AudaSpace. If not, see .
+ *
+ * ***** END LGPL LICENSE BLOCK *****
+ */
+
+#ifndef AUD_BANDPASSFACTORY
+#define AUD_BANDPASSFACTORY
+
+#include "AUD_EffectFactory.h"
+
+/**
+ * This factory creates a band pass filter for a sound wave.
+ */
+class AUD_BandPassFactory : public AUD_EffectFactory
+{
+private:
+ /**
+ * The lowest frequency to be passed.
+ */
+ float m_low;
+
+ /**
+ * The highest frequency to be passed.
+ */
+ float m_high;
+
+public:
+ /**
+ * Creates a new band pass factory.
+ * \param factory The input factory.
+ * \param low The lowest passed frequency.
+ * \param high The highest passed frequency.
+ */
+ AUD_BandPassFactory(AUD_IFactory* factory, float low, float high);
+
+ /**
+ * Creates a new band pass factory.
+ * \param low The lowest passed frequency.
+ * \param high The highest passed frequency.
+ */
+ AUD_BandPassFactory(float low, float high);
+
+ /**
+ * Returns the lowest passed frequency.
+ */
+ float getLow();
+
+ /**
+ * Returns the highest passed frequency.
+ */
+ float getHigh();
+
+ /**
+ * Sets the lowest passed frequency.
+ * \param low The lowest passed frequency.
+ */
+ void setLow(float low);
+
+ /**
+ * Sets the highest passed frequency.
+ * \param high The highest passed frequency.
+ */
+ void setHigh(float hight);
+
+ virtual AUD_IReader* createReader();
+};
+
+#endif //AUD_BANDPASSFACTORY
diff --git a/intern/audaspace/fftw/AUD_BandPassReader.cpp b/intern/audaspace/fftw/AUD_BandPassReader.cpp
new file mode 100644
index 00000000000..34d7193866f
--- /dev/null
+++ b/intern/audaspace/fftw/AUD_BandPassReader.cpp
@@ -0,0 +1,126 @@
+/*
+ * $Id$
+ *
+ * ***** BEGIN LGPL LICENSE BLOCK *****
+ *
+ * Copyright 2009 Jörg Hermann Müller
+ *
+ * This file is part of AudaSpace.
+ *
+ * AudaSpace is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * AudaSpace is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with AudaSpace. If not, see .
+ *
+ * ***** END LGPL LICENSE BLOCK *****
+ */
+
+#include "AUD_BandPassReader.h"
+#include "AUD_Buffer.h"
+
+#include
+#include
+
+AUD_BandPassReader::AUD_BandPassReader(AUD_IReader* reader, float low,
+ float high) :
+ AUD_EffectReader(reader), m_low(low), m_high(high)
+{
+ m_buffer = new AUD_Buffer(); AUD_NEW("buffer")
+ m_in = new AUD_Buffer(); AUD_NEW("buffer")
+ m_out = new AUD_Buffer(); AUD_NEW("buffer")
+ m_length = 0;
+}
+
+AUD_BandPassReader::~AUD_BandPassReader()
+{
+ if(m_length != 0)
+ {
+ fftw_destroy_plan(m_forward);
+ fftw_destroy_plan(m_backward);
+ }
+
+ delete m_buffer; AUD_DELETE("buffer")
+ delete m_in; AUD_DELETE("buffer")
+ delete m_out; AUD_DELETE("buffer")
+}
+
+AUD_ReaderType AUD_BandPassReader::getType()
+{
+ return m_reader->getType();
+}
+
+void AUD_BandPassReader::read(int & length, sample_t* & buffer)
+{
+ AUD_Specs specs = m_reader->getSpecs();
+
+ m_reader->read(length, buffer);
+
+ if(length > 0)
+ {
+ if(length * AUD_SAMPLE_SIZE(specs) > m_buffer->getSize())
+ m_buffer->resize(length * AUD_SAMPLE_SIZE(specs));
+
+ if(length != m_length)
+ {
+ if(m_length != 0)
+ {
+ fftw_destroy_plan(m_forward);
+ fftw_destroy_plan(m_backward);
+ }
+
+ m_length = length;
+ printf("WINDOW: %d\n", m_length);
+
+ if(m_length * sizeof(double) > m_in->getSize())
+ {
+ m_in->resize(m_length * sizeof(double));
+ m_out->resize((m_length / 2 + 1) * sizeof(fftw_complex));
+ }
+
+ m_forward = fftw_plan_dft_r2c_1d(m_length,
+ (double*)m_in->getBuffer(),
+ (fftw_complex*)m_out->getBuffer(),
+ FFTW_ESTIMATE);
+ m_backward = fftw_plan_dft_c2r_1d(m_length,
+ (fftw_complex*)m_out->getBuffer(),
+ (double*)m_in->getBuffer(),
+ FFTW_ESTIMATE);
+ }
+
+ float* source = (float*) buffer;
+ double* target = (double*) m_in->getBuffer();
+ float* target2 = (float*) m_buffer->getBuffer();
+ fftw_complex* complex = (fftw_complex*) m_out->getBuffer();
+ float frequency;
+
+ for(int channel = 0; channel < specs.channels; channel++)
+ {
+ for(int i = 0; i < m_length; i++)
+ target[i] = source[i * specs.channels + channel];
+
+ fftw_execute(m_forward);
+
+ for(int i = 0; i < m_length / 2 + 1; i++)
+ {
+ frequency = i * specs.rate / (m_length / 2.0 + 1.0);
+ if((frequency < m_low) || (frequency > m_high))
+ complex[i][0] = complex[i][1] = 0.0;
+ }
+
+ fftw_execute(m_backward);
+
+ for(int i = 0; i < m_length; i++)
+ target2[i * specs.channels + channel] = target[i] / m_length;
+ }
+ }
+
+ buffer = m_buffer->getBuffer();
+}
diff --git a/intern/audaspace/fftw/AUD_BandPassReader.h b/intern/audaspace/fftw/AUD_BandPassReader.h
new file mode 100644
index 00000000000..7a8fd3b94d5
--- /dev/null
+++ b/intern/audaspace/fftw/AUD_BandPassReader.h
@@ -0,0 +1,99 @@
+/*
+ * $Id$
+ *
+ * ***** BEGIN LGPL LICENSE BLOCK *****
+ *
+ * Copyright 2009 Jörg Hermann Müller
+ *
+ * This file is part of AudaSpace.
+ *
+ * AudaSpace is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * AudaSpace is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with AudaSpace. If not, see .
+ *
+ * ***** END LGPL LICENSE BLOCK *****
+ */
+
+#ifndef AUD_BANDPASSREADER
+#define AUD_BANDPASSREADER
+
+#include
+
+#include "AUD_EffectReader.h"
+class AUD_Buffer;
+
+/**
+ * This class only passes a specific frequency band of another reader.
+ */
+class AUD_BandPassReader : public AUD_EffectReader
+{
+private:
+ /**
+ * The playback buffer.
+ */
+ AUD_Buffer *m_buffer;
+
+ /**
+ * The input buffer for fourier transformations.
+ */
+ AUD_Buffer *m_in;
+
+ /**
+ * The output buffer for fourier transformations.
+ */
+ AUD_Buffer *m_out;
+
+ /**
+ * The lowest passed frequency.
+ */
+ float m_low;
+
+ /**
+ * The highest passed frequency.
+ */
+ float m_high;
+
+ /**
+ * The fftw plan for forward transformation.
+ */
+ fftw_plan m_forward;
+
+ /**
+ * The fftw plan for backward transformation.
+ */
+ fftw_plan m_backward;
+
+ /**
+ * The length of the plans.
+ */
+ int m_length;
+
+public:
+ /**
+ * Creates a new band pass reader.
+ * \param reader The reader to read from.
+ * \param low The lowest passed frequency.
+ * \param high The highest passed frequency.
+ * \exception AUD_Exception Thrown if the reader specified is NULL.
+ */
+ AUD_BandPassReader(AUD_IReader* reader, float low, float high);
+
+ /**
+ * Destroys the reader.
+ */
+ virtual ~AUD_BandPassReader();
+
+ virtual AUD_ReaderType getType();
+ virtual void read(int & length, sample_t* & buffer);
+};
+
+#endif //AUD_BANDPASSREADER
diff --git a/intern/audaspace/fftw/Makefile b/intern/audaspace/fftw/Makefile
new file mode 100644
index 00000000000..c2d069d068c
--- /dev/null
+++ b/intern/audaspace/fftw/Makefile
@@ -0,0 +1,42 @@
+#
+# $Id$
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# The Original Code is Copyright (C) 2001-2002 by NaN Holding BV.
+# All rights reserved.
+#
+# The Original Code is: all of this file.
+#
+# Contributor(s): none yet.
+#
+# ***** END GPL LICENSE BLOCK *****
+#
+#
+
+LIBNAME = aud_fftw
+DIR = $(OCGDIR)/intern/audaspace
+
+include nan_compile.mk
+
+CCFLAGS += $(LEVEL_1_CPP_WARNINGS)
+
+CPPFLAGS += -I../intern
+CPPFLAGS += -I../FX
+CPPFLAGS += -I..
+CPPFLAGS += -I.
+CPPFLAGS += -I$(BF_FFTW3)/include
diff --git a/intern/audaspace/intern/AUD_BufferReader.cpp b/intern/audaspace/intern/AUD_BufferReader.cpp
index 47bf5d3d171..6cea849bdfc 100644
--- a/intern/audaspace/intern/AUD_BufferReader.cpp
+++ b/intern/audaspace/intern/AUD_BufferReader.cpp
@@ -81,7 +81,7 @@ void AUD_BufferReader::read(int & length, sample_t* & buffer)
buffer = m_buffer.get()->getBuffer()+m_position*sample_size;
- // in case the end of the buffer is reach
+ // in case the end of the buffer is reached
if(m_buffer.get()->getSize() < (m_position+length)*sample_size)
length = m_buffer.get()->getSize()/sample_size-m_position;
diff --git a/intern/audaspace/intern/AUD_C-API.cpp b/intern/audaspace/intern/AUD_C-API.cpp
index 255d1d2f1f6..cd6b95b1d9e 100644
--- a/intern/audaspace/intern/AUD_C-API.cpp
+++ b/intern/audaspace/intern/AUD_C-API.cpp
@@ -31,6 +31,7 @@
#include "AUD_LimiterFactory.h"
#include "AUD_PingPongFactory.h"
#include "AUD_LoopFactory.h"
+#include "AUD_RectifyFactory.h"
#include "AUD_ReadDevice.h"
#include "AUD_SourceCaps.h"
#include "AUD_IReader.h"
@@ -285,6 +286,20 @@ int AUD_stopLoop(AUD_Handle* handle)
return false;
}
+AUD_Sound* AUD_rectifySound(AUD_Sound* sound)
+{
+ assert(sound);
+
+ try
+ {
+ return new AUD_RectifyFactory(sound);
+ }
+ catch(AUD_Exception)
+ {
+ return NULL;
+ }
+}
+
void AUD_unload(AUD_Sound* sound)
{
assert(sound);
diff --git a/intern/audaspace/intern/AUD_C-API.h b/intern/audaspace/intern/AUD_C-API.h
index 66a5a5147b3..866f0248cb2 100644
--- a/intern/audaspace/intern/AUD_C-API.h
+++ b/intern/audaspace/intern/AUD_C-API.h
@@ -149,6 +149,13 @@ extern AUD_Sound* AUD_loopSound(AUD_Sound* sound);
*/
extern int AUD_stopLoop(AUD_Handle* handle);
+/**
+ * Rectifies a sound.
+ * \param sound The sound to rectify.
+ * \return A handle of the rectified sound.
+ */
+extern AUD_Sound* AUD_rectifySound(AUD_Sound* sound);
+
/**
* Unloads a sound of any type.
* \param sound The handle of the sound.
diff --git a/intern/audaspace/intern/AUD_ConverterFunctions.cpp b/intern/audaspace/intern/AUD_ConverterFunctions.cpp
index b6d5dffa1a2..1b5d07bcc61 100644
--- a/intern/audaspace/intern/AUD_ConverterFunctions.cpp
+++ b/intern/audaspace/intern/AUD_ConverterFunctions.cpp
@@ -500,3 +500,43 @@ void AUD_volume_adjust_s24_be(sample_t* target, sample_t* source,
}
}
+void AUD_rectify_u8(sample_t* target, sample_t* source, int count)
+{
+ for(int i=0; i> 23) * 255) << 24;
+ if(value < 0)
+ value = -value;
+ target[i+2] = value >> 16;
+ target[i+1] = value >> 8;
+ target[i] = value;
+ }
+}
+
+void AUD_rectify_s24_be(sample_t* target, sample_t* source, int count)
+{
+ count *= 3;
+ int value;
+
+ for(int i=0; i < count; i+=3)
+ {
+ value = source[i] << 16 | source[i+1] << 8 | source[i+2];
+ value |= (((value & 0x800000) >> 23) * 255) << 24;
+ if(value < 0)
+ value = -value;
+ target[i] = value >> 16;
+ target[i+1] = value >> 8;
+ target[i+2] = value;
+ }
+}
+
diff --git a/intern/audaspace/intern/AUD_ConverterFunctions.h b/intern/audaspace/intern/AUD_ConverterFunctions.h
index c1dd0f4a3a2..686e58704b7 100644
--- a/intern/audaspace/intern/AUD_ConverterFunctions.h
+++ b/intern/audaspace/intern/AUD_ConverterFunctions.h
@@ -46,6 +46,8 @@ typedef void (*AUD_convert_f)(sample_t* target, sample_t* source, int length);
typedef void (*AUD_volume_adjust_f)(sample_t* target, sample_t* source,
int count, float volume);
+typedef void (*AUD_rectify_f)(sample_t* target, sample_t* source, int count);
+
template
void AUD_convert_copy(sample_t* target, sample_t* source, int length)
{
@@ -153,4 +155,19 @@ void AUD_volume_adjust_s24_le(sample_t* target, sample_t* source,
void AUD_volume_adjust_s24_be(sample_t* target, sample_t* source,
int count, float volume);
+template
+void AUD_rectify(sample_t* target, sample_t* source, int count)
+{
+ T* t = (T*)target;
+ T* s = (T*)source;
+ for(int i=0; i < count; i++)
+ t[i] = s[i] < 0 ? -s[i] : s[i];
+}
+
+void AUD_rectify_u8(sample_t* target, sample_t* source, int count);
+
+void AUD_rectify_s24_le(sample_t* target, sample_t* source, int count);
+
+void AUD_rectify_s24_be(sample_t* target, sample_t* source, int count);
+
#endif //AUD_CONVERTERFUNCTIONS
diff --git a/intern/audaspace/intern/Makefile b/intern/audaspace/intern/Makefile
index 4bdca04c1cb..8b159f4a639 100644
--- a/intern/audaspace/intern/Makefile
+++ b/intern/audaspace/intern/Makefile
@@ -59,9 +59,10 @@ endif
ifeq ($(WITH_SNDFILE),true)
CPPFLAGS += -DWITH_SNDFILE
CPPFLAGS += -I../sndfile
+ CPPFLAGS += -I$(NAN_SNDFILE)/include
endif
-CPPFLAGS += -I$(LCGDIR)/samplerate/include/
+CPPFLAGS += -I$(NAN_SAMPLERATE)/include/
CPPFLAGS += -I../ffmpeg
CPPFLAGS += -I../FX
CPPFLAGS += -I../SDL
diff --git a/intern/audaspace/make/msvc_9_0/audaspace.vcproj b/intern/audaspace/make/msvc_9_0/audaspace.vcproj
index 7c72ef70f06..37ac98c0cf2 100644
--- a/intern/audaspace/make/msvc_9_0/audaspace.vcproj
+++ b/intern/audaspace/make/msvc_9_0/audaspace.vcproj
@@ -44,7 +44,7 @@
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="2"
- AdditionalIncludeDirectories="..\..;..\..\ffmpeg;..\..\FX;..\..\intern;..\..\OpenAL;..\..\SDL;..\..\SRC;..\..\sndfile;..\..\..\..\..\lib\windows\pthreads\include;..\..\..\..\..\lib\windows\samplerate\include;..\..\..\..\..\lib\windows\ffmpeg\include;..\..\..\..\..\lib\windows\ffmpeg\include\msvc;..\..\..\..\..\lib\windows\sdl\include;..\..\..\..\..\lib\windows\openal\include;..\..\..\..\..\lib\windows\jack\include;..\..\..\..\..\lib\windows\sndfile\include"
+ AdditionalIncludeDirectories="..\..;..\..\ffmpeg;..\..\FX;..\..\intern;..\..\OpenAL;..\..\SDL;..\..\SRC;..\..\sndfile;..\..\..\..\..\lib\windows\pthreads\include;..\..\..\..\..\lib\windows\samplerate\include;..\..\..\..\..\lib\windows\ffmpeg\include;..\..\..\..\..\lib\windows\ffmpeg\include\msvc;..\..\..\..\..\lib\windows\sdl\include;..\..\..\..\..\lib\windows\openal\include;..\..\..\..\..\lib\windows\jack\include;..\..\..\..\..\lib\windows\sndfile\include;..\..\..\..\..\lib\windows\fftw3\include"
PreprocessorDefinitions="WIN32,NDEBUG,_LIB,WITH_FFMPEG,WITH_SDL,WITH_OPENAL"
StringPooling="true"
RuntimeLibrary="0"
@@ -118,7 +118,7 @@
+
+
+
+
+
+
+
+
@@ -762,6 +778,26 @@
>
+
+
+
+
+
+
+
+
+
+
diff --git a/intern/audaspace/sndfile/Makefile b/intern/audaspace/sndfile/Makefile
index 1cf0b2683fb..0012a11203e 100644
--- a/intern/audaspace/sndfile/Makefile
+++ b/intern/audaspace/sndfile/Makefile
@@ -35,6 +35,7 @@ include nan_compile.mk
CCFLAGS += $(LEVEL_1_CPP_WARNINGS)
+CPPFLAGS += -I$(NAN_SNDFILE)/include
CPPFLAGS += -I../intern
CPPFLAGS += -I..
CPPFLAGS += -I.
diff --git a/intern/opennl/superlu/sgstrf.c b/intern/opennl/superlu/sgstrf.c
index 42f8dc9d0ee..e48cc33b00b 100644
--- a/intern/opennl/superlu/sgstrf.c
+++ b/intern/opennl/superlu/sgstrf.c
@@ -283,8 +283,12 @@ sgstrf (superlu_options_t *options, SuperMatrix *A,
* -------------------------------------- */
/* Determine the union of the row structure of the snode */
if ( (*info = ssnode_dfs(jcol, kcol, asub, xa_begin, xa_end,
- xprune, marker, &Glu)) != 0 )
+ xprune, marker, &Glu)) != 0 ) {
+ if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r);
+ SUPERLU_FREE (iperm_c);
+ SUPERLU_FREE (relax_end);
return;
+ }
nextu = xusub[jcol];
nextlu = xlusup[jcol];
@@ -293,8 +297,12 @@ sgstrf (superlu_options_t *options, SuperMatrix *A,
new_next = nextlu + (xlsub[fsupc+1]-xlsub[fsupc])*(kcol-jcol+1);
nzlumax = Glu.nzlumax;
while ( new_next > nzlumax ) {
- if ( (*info = sLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, &Glu)) )
- return;
+ if ( (*info = sLUMemXpand(jcol, nextlu, LUSUP, &nzlumax, &Glu)) ) {
+ if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r);
+ SUPERLU_FREE (iperm_c);
+ SUPERLU_FREE (relax_end);
+ return;
+ }
}
for (icol = jcol; icol<= kcol; icol++) {
@@ -350,17 +358,31 @@ sgstrf (superlu_options_t *options, SuperMatrix *A,
if ((*info = scolumn_dfs(m, jj, perm_r, &nseg, &panel_lsub[k],
segrep, &repfnz[k], xprune, marker,
- parent, xplore, &Glu)) != 0) return;
+ parent, xplore, &Glu)) != 0) {
+ if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r);
+ SUPERLU_FREE (iperm_c);
+ SUPERLU_FREE (relax_end);
+ return;
+ }
/* Numeric updates */
if ((*info = scolumn_bmod(jj, (nseg - nseg1), &dense[k],
tempv, &segrep[nseg1], &repfnz[k],
- jcol, &Glu, stat)) != 0) return;
+ jcol, &Glu, stat)) != 0) {
+ if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r);
+ SUPERLU_FREE (iperm_c);
+ SUPERLU_FREE (relax_end);
+ return;
+ }
/* Copy the U-segments to ucol[*] */
if ((*info = scopy_to_ucol(jj, nseg, segrep, &repfnz[k],
- perm_r, &dense[k], &Glu)) != 0)
- return;
+ perm_r, &dense[k], &Glu)) != 0) {
+ if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r);
+ SUPERLU_FREE (iperm_c);
+ SUPERLU_FREE (relax_end);
+ return;
+ }
if ( (*info = spivotL(jj, diag_pivot_thresh, &usepr, perm_r,
iperm_r, iperm_c, &pivrow, &Glu, stat)) )
@@ -429,5 +451,4 @@ sgstrf (superlu_options_t *options, SuperMatrix *A,
if ( iperm_r_allocated ) SUPERLU_FREE (iperm_r);
SUPERLU_FREE (iperm_c);
SUPERLU_FREE (relax_end);
-
}
diff --git a/projectfiles_vc9/blender/editors/ED_editors.vcproj b/projectfiles_vc9/blender/editors/ED_editors.vcproj
index 0956dd7c388..0602112106d 100644
--- a/projectfiles_vc9/blender/editors/ED_editors.vcproj
+++ b/projectfiles_vc9/blender/editors/ED_editors.vcproj
@@ -415,6 +415,10 @@
RelativePath="..\..\..\source\blender\editors\interface\interface_layout.c"
>
+
+
diff --git a/release/scripts/io/export_3ds.py b/release/scripts/io/export_3ds.py
index edbb80b0272..23fd44acc3f 100644
--- a/release/scripts/io/export_3ds.py
+++ b/release/scripts/io/export_3ds.py
@@ -1126,23 +1126,21 @@ class Export3DS(bpy.types.Operator):
def execute(self, context):
save_3ds(self.properties.path, context)
- return ('FINISHED',)
+ return {'FINISHED'}
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self)
- return ('RUNNING_MODAL',)
+ return {'RUNNING_MODAL'}
def poll(self, context): # Poll isnt working yet
return context.active_object != None
-bpy.ops.add(Export3DS)
+bpy.types.register(Export3DS)
# Add to a menu
-import dynamic_menu
-
def menu_func(self, context):
default_path = bpy.data.filename.replace(".blend", ".3ds")
self.layout.operator(Export3DS.bl_idname, text="Autodesk 3DS...").path = default_path
-menu_item = dynamic_menu.add(bpy.types.INFO_MT_file_export, menu_func)
+bpy.types.INFO_MT_file_export.append(menu_func)
diff --git a/release/scripts/io/export_fbx.py b/release/scripts/io/export_fbx.py
index ba2741a7e7e..6407f9b6e49 100644
--- a/release/scripts/io/export_fbx.py
+++ b/release/scripts/io/export_fbx.py
@@ -651,8 +651,8 @@ def write(filename, batch_objects = None, \
}''' % (curtime))
file.write('\nCreationTime: "%.4i-%.2i-%.2i %.2i:%.2i:%.2i:000"' % curtime)
- file.write('\nCreator: "Blender3D version 2.5"')
-# file.write('\nCreator: "Blender3D version %.2f"' % Blender.Get('version'))
+ file.write('\nCreator: "Blender3D version %s"' % bpy.version_string)
+
pose_items = [] # list of (fbxName, matrix) to write pose data for, easier to collect allong the way
@@ -3361,7 +3361,7 @@ class ExportFBX(bpy.types.Operator):
# to the class instance from the operator settings before calling.
- path = StringProperty(name="File Path", description="File path used for exporting the FBX file", maxlen= 1024, default= "")
+ path = StringProperty(name="File Path", description="File path used for exporting the FBX file", maxlen= 1024, default="")
EXP_OBS_SELECTED = BoolProperty(name="Selected Objects", description="Export selected objects on visible layers", default=True)
# EXP_OBS_SCENE = BoolProperty(name="Scene Objects", description="Export all objects in this scene", default=True)
@@ -3387,7 +3387,7 @@ class ExportFBX(bpy.types.Operator):
BATCH_ENABLE = BoolProperty(name="Enable Batch", description="Automate exporting multiple scenes or groups to files", default=False)
BATCH_GROUP = BoolProperty(name="Group > File", description="Export each group as an FBX file, if false, export each scene as an FBX file", default=False)
BATCH_OWN_DIR = BoolProperty(name="Own Dir", description="Create a dir for each exported file", default=True)
- BATCH_FILE_PREFIX = StringProperty(name="Prefix", description="Prefix each file with this name", maxlen= 1024, default="")
+ BATCH_FILE_PREFIX = StringProperty(name="Prefix", description="Prefix each file with this name", maxlen=1024, default="")
def poll(self, context):
@@ -3426,15 +3426,15 @@ class ExportFBX(bpy.types.Operator):
self.properties.BATCH_FILE_PREFIX,
self.properties.BATCH_OWN_DIR)
- return ('FINISHED',)
+ return {'FINISHED'}
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self)
- return ('RUNNING_MODAL',)
+ return {'RUNNING_MODAL'}
-bpy.ops.add(ExportFBX)
+bpy.types.register(ExportFBX)
# if __name__ == "__main__":
# bpy.ops.EXPORT_OT_ply(filename="/tmp/test.ply")
@@ -3462,13 +3462,8 @@ bpy.ops.add(ExportFBX)
# SMALL or COSMETICAL
# - find a way to get blender version, and put it in bpy.util?, old was Blender.Get('version')
-
-# Add to a menu
-import dynamic_menu
-
def menu_func(self, context):
default_path = bpy.data.filename.replace(".blend", ".fbx")
self.layout.operator(ExportFBX.bl_idname, text="Autodesk FBX...").path = default_path
-menu_item = dynamic_menu.add(bpy.types.INFO_MT_file_export, menu_func)
-
+menu_item = bpy.types.INFO_MT_file_export.append(menu_func)
diff --git a/release/scripts/io/export_mdd.py b/release/scripts/io/export_mdd.py
index cfd73b1979e..dd394d7b1eb 100644
--- a/release/scripts/io/export_mdd.py
+++ b/release/scripts/io/export_mdd.py
@@ -96,7 +96,7 @@ def write(filename, sce, ob, PREF_STARTFRAME, PREF_ENDFRAME, PREF_FPS):
numverts = len(me.verts)
- numframes = PREF_ENDFRAME-PREF_STARTFRAME + 1
+ numframes = PREF_ENDFRAME - PREF_STARTFRAME + 1
PREF_FPS = float(PREF_FPS)
f = open(filename, 'wb') #no Errors yet:Safe to create file
@@ -104,7 +104,7 @@ def write(filename, sce, ob, PREF_STARTFRAME, PREF_ENDFRAME, PREF_FPS):
f.write(pack(">2i", numframes, numverts))
# Write the frame times (should we use the time IPO??)
- f.write( pack(">%df" % (numframes), *[frame / PREF_FPS for frame in range(numframes)]) ) # seconds
+ f.write(pack(">%df" % (numframes), *[frame / PREF_FPS for frame in range(numframes)])) # seconds
#rest frame needed to keep frames in sync
"""
@@ -159,8 +159,8 @@ class ExportMDD(bpy.types.Operator):
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
- path = StringProperty(name="File Path", description="File path used for exporting the MDD file", maxlen= 1024, default= "")
- fps = IntProperty(name="Frames Per Second", description="Number of frames/second", min=minfps, max=maxfps, default= 25)
+ path = StringProperty(name="File Path", description="File path used for exporting the MDD file", maxlen=1024)
+ fps = IntProperty(name="Frames Per Second", description="Number of frames/second", min=minfps, max=maxfps, default=25)
start_frame = IntProperty(name="Start Frame", description="Start frame for baking", min=minframe, max=maxframe, default=1)
end_frame = IntProperty(name="End Frame", description="End frame for baking", min=minframe, max=maxframe, default=250)
@@ -173,24 +173,21 @@ class ExportMDD(bpy.types.Operator):
raise Exception("filename not set")
write(self.properties.path, context.scene, context.active_object,
self.properties.start_frame, self.properties.end_frame, self.properties.fps)
- return ('FINISHED',)
+ return {'FINISHED'}
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self)
- return ('RUNNING_MODAL',)
+ return {'RUNNING_MODAL'}
-bpy.ops.add(ExportMDD)
-
-# Add to a menu
-import dynamic_menu
+bpy.types.register(ExportMDD)
def menu_func(self, context):
default_path = bpy.data.filename.replace(".blend", ".mdd")
self.layout.operator(ExportMDD.bl_idname, text="Vertex Keyframe Animation (.mdd)...").path = default_path
-menu_item = dynamic_menu.add(bpy.types.INFO_MT_file_export, menu_func)
+bpy.types.INFO_MT_file_export.append(menu_func)
if __name__ == '__main__':
bpy.ops.export.mdd(path="/tmp/test.mdd")
diff --git a/release/scripts/io/export_obj.py b/release/scripts/io/export_obj.py
index d6026d10ba9..6135375d185 100644
--- a/release/scripts/io/export_obj.py
+++ b/release/scripts/io/export_obj.py
@@ -40,29 +40,6 @@ All objects that can be represented as a mesh (mesh, curve, metaball, surface, t
will be exported as mesh data.
"""
-
-# --------------------------------------------------------------------------
-# OBJ Export v1.1 by Campbell Barton (AKA Ideasman)
-# --------------------------------------------------------------------------
-# ***** BEGIN GPL LICENSE BLOCK *****
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-# ***** END GPL LICENCE BLOCK *****
-# --------------------------------------------------------------------------
-
# import math and other in functions that use them for the sake of fast Blender startup
# import math
import os
@@ -384,8 +361,7 @@ def write(filename, objects, scene,
file = open(filename, "w")
# Write Header
- version = "2.5"
- file.write('# Blender3D v%s OBJ File: %s\n' % (version, bpy.data.filename.split('/')[-1].split('\\')[-1] ))
+ file.write('# Blender3D v%s OBJ File: %s\n' % (bpy.version_string, bpy.data.filename.split('/')[-1].split('\\')[-1] ))
file.write('# www.blender3d.org\n')
# Tell the obj file what material file to use.
@@ -444,7 +420,7 @@ def write(filename, objects, scene,
me = ob.create_mesh(EXPORT_APPLY_MODIFIERS, 'PREVIEW')
if EXPORT_ROTX90:
- me.transform(ob_mat * mat_xrot90)
+ me.transform(mat_xrot90 * ob_mat)
else:
me.transform(ob_mat)
@@ -980,26 +956,20 @@ class ExportOBJ(bpy.types.Operator):
EXPORT_SEL_ONLY=self.properties.use_selection,
EXPORT_ALL_SCENES=self.properties.use_all_scenes)
- return ('FINISHED',)
+ return {'FINISHED'}
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self)
- return ('RUNNING_MODAL',)
+ return {'RUNNING_MODAL'}
-
-
-
-
-bpy.ops.add(ExportOBJ)
-
-import dynamic_menu
+bpy.types.register(ExportOBJ)
def menu_func(self, context):
default_path = bpy.data.filename.replace(".blend", ".obj")
self.layout.operator(ExportOBJ.bl_idname, text="Wavefront (.obj)...").path = default_path
-menu_item = dynamic_menu.add(bpy.types.INFO_MT_file_export, menu_func)
+menu_item = bpy.types.INFO_MT_file_export.append(menu_func)
if __name__ == "__main__":
bpy.ops.EXPORT_OT_obj(filename="/tmp/test.obj")
diff --git a/release/scripts/io/export_ply.py b/release/scripts/io/export_ply.py
index 858e22952b8..3c9e1f86ee2 100644
--- a/release/scripts/io/export_ply.py
+++ b/release/scripts/io/export_ply.py
@@ -180,7 +180,7 @@ def write(filename, scene, ob, \
normal_key = rvec3d(normal)
if faceUV:
- uvcoord = uv[j][0], 1.0-uv[j][1]
+ uvcoord = uv[j][0], 1.0 - uv[j][1]
uvcoord_key = rvec2d(uvcoord)
elif vertexUV:
uvcoord = v.uvco[0], 1.0 - v.uvco[1]
@@ -205,8 +205,7 @@ def write(filename, scene, ob, \
file.write('ply\n')
file.write('format ascii 1.0\n')
- version = "2.5" # Blender.Get('version')
- file.write('comment Created by Blender3D %s - www.blender.org, source file: %s\n' % (version, bpy.data.filename.split('/')[-1].split('\\')[-1]))
+ file.write('comment Created by Blender3D %s - www.blender.org, source file: %s\n' % (bpy.version_string, bpy.data.filename.split('/')[-1].split('\\')[-1]))
file.write('element vertex %d\n' % len(ply_verts))
@@ -246,7 +245,7 @@ def write(filename, scene, ob, \
file.write('\n')
for pf in ply_faces:
- if len(pf)==3:
+ if len(pf) == 3:
file.write('3 %d %d %d\n' % tuple(pf))
else:
file.write('4 %d %d %d %d\n' % tuple(pf))
@@ -297,12 +296,12 @@ class ExportPLY(bpy.types.Operator):
EXPORT_COLORS=self.properties.use_colors,
)
- return ('FINISHED',)
+ return {'FINISHED'}
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self)
- return ('RUNNING_MODAL',)
+ return {'RUNNING_MODAL'}
def draw(self, context):
layout = self.layout
@@ -316,16 +315,15 @@ class ExportPLY(bpy.types.Operator):
row.prop(props, "use_colors")
-bpy.ops.add(ExportPLY)
-
-import dynamic_menu
+bpy.types.register(ExportPLY)
def menu_func(self, context):
default_path = bpy.data.filename.replace(".blend", ".ply")
self.layout.operator(ExportPLY.bl_idname, text="Stanford (.ply)...").path = default_path
-menu_item = dynamic_menu.add(bpy.types.INFO_MT_file_export, menu_func)
+bpy.types.INFO_MT_file_export.append(menu_func)
if __name__ == "__main__":
bpy.ops.export.ply(path="/tmp/test.ply")
+
diff --git a/release/scripts/io/export_x3d.py b/release/scripts/io/export_x3d.py
index 6cfe0f45626..4c3540a458e 100644
--- a/release/scripts/io/export_x3d.py
+++ b/release/scripts/io/export_x3d.py
@@ -1233,22 +1233,21 @@ class ExportX3D(bpy.types.Operator):
def execute(self, context):
x3d_export(self.properties.path, context, self.properties.apply_modifiers, self.properties.triangulate, self.properties.compress)
- return ('FINISHED',)
+ return {'FINISHED'}
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self)
- return ('RUNNING_MODAL',)
+ return {'RUNNING_MODAL'}
-bpy.ops.add(ExportX3D)
+bpy.types.register(ExportX3D)
-import dynamic_menu
def menu_func(self, context):
default_path = bpy.data.filename.replace(".blend", ".x3d")
self.layout.operator(ExportX3D.bl_idname, text="X3D Extensible 3D (.x3d)...").path = default_path
-menu_item = dynamic_menu.add(bpy.types.INFO_MT_file_export, menu_func)
+bpy.types.INFO_MT_file_export.append(menu_func)
# NOTES
# - blender version is hardcoded
diff --git a/release/scripts/io/import_anim_bvh.py b/release/scripts/io/import_anim_bvh.py
index 30ff99c1fe8..2afd2db3cb2 100644
--- a/release/scripts/io/import_anim_bvh.py
+++ b/release/scripts/io/import_anim_bvh.py
@@ -887,17 +887,15 @@ class BvhImporter(bpy.types.Operator):
read_bvh(context, self.properties.path)
- return ('FINISHED',)
+ return {'FINISHED'}
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self)
- return ('RUNNING_MODAL',)
+ return {'RUNNING_MODAL'}
-bpy.ops.add(BvhImporter)
+bpy.types.register(BvhImporter)
-
-import dynamic_menu
menu_func = lambda self, context: self.layout.operator(BvhImporter.bl_idname, text="Motion Capture (.bvh)...")
-menu_item = dynamic_menu.add(bpy.types.INFO_MT_file_import, menu_func)
+bpy.types.INFO_MT_file_import.append(menu_func)
diff --git a/release/scripts/io/import_scene_3ds.py b/release/scripts/io/import_scene_3ds.py
index 4744c5df01a..4420f6ef6f0 100644
--- a/release/scripts/io/import_scene_3ds.py
+++ b/release/scripts/io/import_scene_3ds.py
@@ -1151,7 +1151,7 @@ class IMPORT_OT_autodesk_3ds(bpy.types.Operator):
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
- path = StringProperty(name="File Path", description="File path used for importing the 3DS file", maxlen= 1024, default= ""),
+ path = StringProperty(name="File Path", description="File path used for importing the 3DS file", maxlen= 1024, default= "")
# size_constraint = FloatProperty(name="Size Constraint", description="Scale the model by 10 until it reacehs the size constraint. Zero Disables.", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0),
# search_images = BoolProperty(name="Image Search", description="Search subdirectories for any assosiated images (Warning, may be slow)", default=True),
@@ -1159,19 +1159,18 @@ class IMPORT_OT_autodesk_3ds(bpy.types.Operator):
def execute(self, context):
load_3ds(self.properties.path, context, 0.0, False, False)
- return ('FINISHED',)
+ return {'FINISHED'}
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self)
- return ('RUNNING_MODAL',)
+ return {'RUNNING_MODAL'}
-bpy.ops.add(IMPORT_OT_autodesk_3ds)
+bpy.types.register(IMPORT_OT_autodesk_3ds)
-import dynamic_menu
menu_func = lambda self, context: self.layout.operator(IMPORT_OT_autodesk_3ds.bl_idname, text="3D Studio (.3ds)...")
-menu_item = dynamic_menu.add(bpy.types.INFO_MT_file_import, menu_func)
+bpy.types.INFO_MT_file_import.append(menu_func)
# NOTES:
-# why add 1 extra vertex? and remove it when done?
+# why add 1 extra vertex? and remove it when done? - "Answer - eekadoodle - would need to re-order UV's without this since face order isnt always what we give blender, BMesh will solve :D"
# disabled scaling to size, this requires exposing bb (easy) and understanding how it works (needs some time)
diff --git a/release/scripts/io/import_scene_obj.py b/release/scripts/io/import_scene_obj.py
index cbd2b831e55..228084561b8 100644
--- a/release/scripts/io/import_scene_obj.py
+++ b/release/scripts/io/import_scene_obj.py
@@ -1616,20 +1616,19 @@ class IMPORT_OT_obj(bpy.types.Operator):
self.properties.IMAGE_SEARCH,
self.properties.POLYGROUPS)
- return ('FINISHED',)
+ return {'FINISHED'}
def invoke(self, context, event):
wm = context.manager
wm.add_fileselect(self)
- return ('RUNNING_MODAL',)
+ return {'RUNNING_MODAL'}
-bpy.ops.add(IMPORT_OT_obj)
+bpy.types.register(IMPORT_OT_obj)
-import dynamic_menu
menu_func = lambda self, context: self.layout.operator(IMPORT_OT_obj.bl_idname, text="Wavefront (.obj)...")
-menu_item = dynamic_menu.add(bpy.types.INFO_MT_file_import, menu_func)
+menu_item = bpy.types.INFO_MT_file_import.append(menu_func)
# NOTES (all line numbers refer to 2.4x import_obj.py, not this file)
diff --git a/release/scripts/io/netrender/__init__.py b/release/scripts/io/netrender/__init__.py
index 9e48164a5eb..77ab95535aa 100644
--- a/release/scripts/io/netrender/__init__.py
+++ b/release/scripts/io/netrender/__init__.py
@@ -4,12 +4,12 @@
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
-#
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
diff --git a/release/scripts/io/netrender/balancing.py b/release/scripts/io/netrender/balancing.py
index d8951b1e2e8..e09b19f956c 100644
--- a/release/scripts/io/netrender/balancing.py
+++ b/release/scripts/io/netrender/balancing.py
@@ -4,12 +4,12 @@
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
-#
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -22,129 +22,129 @@ from netrender.utils import *
import netrender.model
class RatingRule:
- def rate(self, job):
- return 0
+ def rate(self, job):
+ return 0
class ExclusionRule:
- def test(self, job):
- return False
+ def test(self, job):
+ return False
class PriorityRule:
- def test(self, job):
- return False
+ def test(self, job):
+ return False
class Balancer:
- def __init__(self):
- self.rules = []
- self.priorities = []
- self.exceptions = []
-
- def addRule(self, rule):
- self.rules.append(rule)
-
- def addPriority(self, priority):
- self.priorities.append(priority)
-
- def addException(self, exception):
- self.exceptions.append(exception)
-
- def applyRules(self, job):
- return sum((rule.rate(job) for rule in self.rules))
-
- def applyPriorities(self, job):
- for priority in self.priorities:
- if priority.test(job):
- return True # priorities are first
-
- return False
-
- def applyExceptions(self, job):
- for exception in self.exceptions:
- if exception.test(job):
- return True # exceptions are last
-
- return False
-
- def sortKey(self, job):
- return (1 if self.applyExceptions(job) else 0, # exceptions after
- 0 if self.applyPriorities(job) else 1, # priorities first
- self.applyRules(job))
-
- def balance(self, jobs):
- if jobs:
- # use inline copy to make sure the list is still accessible while sorting
- jobs[:] = sorted(jobs, key=self.sortKey)
- return jobs[0]
- else:
- return None
-
+ def __init__(self):
+ self.rules = []
+ self.priorities = []
+ self.exceptions = []
+
+ def addRule(self, rule):
+ self.rules.append(rule)
+
+ def addPriority(self, priority):
+ self.priorities.append(priority)
+
+ def addException(self, exception):
+ self.exceptions.append(exception)
+
+ def applyRules(self, job):
+ return sum((rule.rate(job) for rule in self.rules))
+
+ def applyPriorities(self, job):
+ for priority in self.priorities:
+ if priority.test(job):
+ return True # priorities are first
+
+ return False
+
+ def applyExceptions(self, job):
+ for exception in self.exceptions:
+ if exception.test(job):
+ return True # exceptions are last
+
+ return False
+
+ def sortKey(self, job):
+ return (1 if self.applyExceptions(job) else 0, # exceptions after
+ 0 if self.applyPriorities(job) else 1, # priorities first
+ self.applyRules(job))
+
+ def balance(self, jobs):
+ if jobs:
+ # use inline copy to make sure the list is still accessible while sorting
+ jobs[:] = sorted(jobs, key=self.sortKey)
+ return jobs[0]
+ else:
+ return None
+
# ==========================
class RatingUsage(RatingRule):
- def __str__(self):
- return "Usage rating"
-
- def rate(self, job):
- # less usage is better
- return job.usage / job.priority
+ def __str__(self):
+ return "Usage rating"
+
+ def rate(self, job):
+ # less usage is better
+ return job.usage / job.priority
class RatingUsageByCategory(RatingRule):
- def __str__(self):
- return "Usage per category rating"
-
- def __init__(self, get_jobs):
- self.getJobs = get_jobs
- def rate(self, job):
- total_category_usage = sum([j.usage for j in self.getJobs() if j.category == job.category])
- maximum_priority = max([j.priority for j in self.getJobs() if j.category == job.category])
-
- # less usage is better
- return total_category_usage / maximum_priority
-
-class NewJobPriority(PriorityRule):
- def str_limit(self):
- return "less than %i frame%s done" % (self.limit, "s" if self.limit > 1 else "")
+ def __str__(self):
+ return "Usage per category rating"
- def __str__(self):
- return "Priority to new jobs"
-
- def __init__(self, limit = 1):
- self.limit = limit
-
- def test(self, job):
- return job.countFrames(status = DONE) < self.limit
+ def __init__(self, get_jobs):
+ self.getJobs = get_jobs
+ def rate(self, job):
+ total_category_usage = sum([j.usage for j in self.getJobs() if j.category == job.category])
+ maximum_priority = max([j.priority for j in self.getJobs() if j.category == job.category])
+
+ # less usage is better
+ return total_category_usage / maximum_priority
+
+class NewJobPriority(PriorityRule):
+ def str_limit(self):
+ return "less than %i frame%s done" % (self.limit, "s" if self.limit > 1 else "")
+
+ def __str__(self):
+ return "Priority to new jobs"
+
+ def __init__(self, limit = 1):
+ self.limit = limit
+
+ def test(self, job):
+ return job.countFrames(status = DONE) < self.limit
class MinimumTimeBetweenDispatchPriority(PriorityRule):
- def str_limit(self):
- return "more than %i minute%s since last" % (self.limit, "s" if self.limit > 1 else "")
+ def str_limit(self):
+ return "more than %i minute%s since last" % (self.limit, "s" if self.limit > 1 else "")
- def __str__(self):
- return "Priority to jobs that haven't been dispatched recently"
-
- def __init__(self, limit = 10):
- self.limit = limit
-
- def test(self, job):
- return job.countFrames(status = DISPATCHED) == 0 and (time.time() - job.last_dispatched) / 60 > self.limit
+ def __str__(self):
+ return "Priority to jobs that haven't been dispatched recently"
+
+ def __init__(self, limit = 10):
+ self.limit = limit
+
+ def test(self, job):
+ return job.countFrames(status = DISPATCHED) == 0 and (time.time() - job.last_dispatched) / 60 > self.limit
class ExcludeQueuedEmptyJob(ExclusionRule):
- def __str__(self):
- return "Exclude queued and empty jobs"
-
- def test(self, job):
- return job.status != JOB_QUEUED or job.countFrames(status = QUEUED) == 0
-
-class ExcludeSlavesLimit(ExclusionRule):
- def str_limit(self):
- return "more than %.0f%% of all slaves" % (self.limit * 100)
+ def __str__(self):
+ return "Exclude queued and empty jobs"
- def __str__(self):
- return "Exclude jobs that would use too many slaves"
-
- def __init__(self, count_jobs, count_slaves, limit = 0.75):
- self.count_jobs = count_jobs
- self.count_slaves = count_slaves
- self.limit = limit
-
- def test(self, job):
- return not ( self.count_jobs() == 1 or self.count_slaves() <= 1 or float(job.countSlaves() + 1) / self.count_slaves() <= self.limit )
+ def test(self, job):
+ return job.status != JOB_QUEUED or job.countFrames(status = QUEUED) == 0
+
+class ExcludeSlavesLimit(ExclusionRule):
+ def str_limit(self):
+ return "more than %.0f%% of all slaves" % (self.limit * 100)
+
+ def __str__(self):
+ return "Exclude jobs that would use too many slaves"
+
+ def __init__(self, count_jobs, count_slaves, limit = 0.75):
+ self.count_jobs = count_jobs
+ self.count_slaves = count_slaves
+ self.limit = limit
+
+ def test(self, job):
+ return not ( self.count_jobs() == 1 or self.count_slaves() <= 1 or float(job.countSlaves() + 1) / self.count_slaves() <= self.limit )
diff --git a/release/scripts/io/netrender/client.py b/release/scripts/io/netrender/client.py
index bbbb9f26051..d71d6a072ba 100644
--- a/release/scripts/io/netrender/client.py
+++ b/release/scripts/io/netrender/client.py
@@ -4,12 +4,12 @@
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
-#
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -28,253 +28,253 @@ import netrender.master as master
from netrender.utils import *
def addFluidFiles(job, path):
- if os.path.exists(path):
- pattern = re.compile("fluidsurface_(final|preview)_([0-9]+)\.(bobj|bvel)\.gz")
+ if os.path.exists(path):
+ pattern = re.compile("fluidsurface_(final|preview)_([0-9]+)\.(bobj|bvel)\.gz")
- for fluid_file in sorted(os.listdir(path)):
- match = pattern.match(fluid_file)
-
- if match:
- # fluid frames starts at 0, which explains the +1
- # This is stupid
- current_frame = int(match.groups()[1]) + 1
- job.addFile(path + fluid_file, current_frame, current_frame)
+ for fluid_file in sorted(os.listdir(path)):
+ match = pattern.match(fluid_file)
+
+ if match:
+ # fluid frames starts at 0, which explains the +1
+ # This is stupid
+ current_frame = int(match.groups()[1]) + 1
+ job.addFile(path + fluid_file, current_frame, current_frame)
def addPointCache(job, ob, point_cache, default_path):
- if not point_cache.disk_cache:
- return
-
-
- name = point_cache.name
- if name == "":
- name = "".join(["%02X" % ord(c) for c in ob.name])
-
- cache_path = bpy.utils.expandpath(point_cache.filepath) if point_cache.external else default_path
-
- index = "%02i" % point_cache.index
-
- if os.path.exists(cache_path):
- pattern = re.compile(name + "_([0-9]+)_" + index + "\.bphys")
-
- cache_files = []
+ if not point_cache.disk_cache:
+ return
+
+
+ name = point_cache.name
+ if name == "":
+ name = "".join(["%02X" % ord(c) for c in ob.name])
+
+ cache_path = bpy.utils.expandpath(point_cache.filepath) if point_cache.external else default_path
+
+ index = "%02i" % point_cache.index
+
+ if os.path.exists(cache_path):
+ pattern = re.compile(name + "_([0-9]+)_" + index + "\.bphys")
+
+ cache_files = []
+
+ for cache_file in sorted(os.listdir(cache_path)):
+ match = pattern.match(cache_file)
+
+ if match:
+ cache_frame = int(match.groups()[0])
+ cache_files.append((cache_frame, cache_file))
+
+ cache_files.sort()
+
+ if len(cache_files) == 1:
+ cache_frame, cache_file = cache_files[0]
+ job.addFile(cache_path + cache_file, cache_frame, cache_frame)
+ else:
+ for i in range(len(cache_files)):
+ current_item = cache_files[i]
+ next_item = cache_files[i+1] if i + 1 < len(cache_files) else None
+ previous_item = cache_files[i - 1] if i > 0 else None
+
+ current_frame, current_file = current_item
+
+ if not next_item and not previous_item:
+ job.addFile(cache_path + current_file, current_frame, current_frame)
+ elif next_item and not previous_item:
+ next_frame = next_item[0]
+ job.addFile(cache_path + current_file, current_frame, next_frame - 1)
+ elif not next_item and previous_item:
+ previous_frame = previous_item[0]
+ job.addFile(cache_path + current_file, previous_frame + 1, current_frame)
+ else:
+ next_frame = next_item[0]
+ previous_frame = previous_item[0]
+ job.addFile(cache_path + current_file, previous_frame + 1, next_frame - 1)
- for cache_file in sorted(os.listdir(cache_path)):
- match = pattern.match(cache_file)
-
- if match:
- cache_frame = int(match.groups()[0])
- cache_files.append((cache_frame, cache_file))
-
- cache_files.sort()
-
- if len(cache_files) == 1:
- cache_frame, cache_file = cache_files[0]
- job.addFile(cache_path + cache_file, cache_frame, cache_frame)
- else:
- for i in range(len(cache_files)):
- current_item = cache_files[i]
- next_item = cache_files[i+1] if i + 1 < len(cache_files) else None
- previous_item = cache_files[i - 1] if i > 0 else None
-
- current_frame, current_file = current_item
-
- if not next_item and not previous_item:
- job.addFile(cache_path + current_file, current_frame, current_frame)
- elif next_item and not previous_item:
- next_frame = next_item[0]
- job.addFile(cache_path + current_file, current_frame, next_frame - 1)
- elif not next_item and previous_item:
- previous_frame = previous_item[0]
- job.addFile(cache_path + current_file, previous_frame + 1, current_frame)
- else:
- next_frame = next_item[0]
- previous_frame = previous_item[0]
- job.addFile(cache_path + current_file, previous_frame + 1, next_frame - 1)
-
def clientSendJob(conn, scene, anim = False):
- netsettings = scene.network_render
- job = netrender.model.RenderJob()
-
- if anim:
- for f in range(scene.start_frame, scene.end_frame + 1):
- job.addFrame(f)
- else:
- job.addFrame(scene.current_frame)
-
- filename = bpy.data.filename
- job.addFile(filename)
-
- job_name = netsettings.job_name
- path, name = os.path.split(filename)
- if job_name == "[default]":
- job_name = name
-
- ###########################
- # LIBRARIES
- ###########################
- for lib in bpy.data.libraries:
- job.addFile(bpy.utils.expandpath(lib.filename))
-
- ###########################
- # IMAGES
- ###########################
- for image in bpy.data.images:
- if image.source == "FILE" and not image.packed_file:
- job.addFile(bpy.utils.expandpath(image.filename))
-
- ###########################
- # FLUID + POINT CACHE
- ###########################
- root, ext = os.path.splitext(name)
- default_path = path + os.sep + "blendcache_" + root + os.sep # need an API call for that
+ netsettings = scene.network_render
+ job = netrender.model.RenderJob()
- for object in bpy.data.objects:
- for modifier in object.modifiers:
- if modifier.type == 'FLUID_SIMULATION' and modifier.settings.type == "DOMAIN":
- addFluidFiles(job, bpy.utils.expandpath(modifier.settings.path))
- elif modifier.type == "CLOTH":
- addPointCache(job, object, modifier.point_cache, default_path)
- elif modifier.type == "SOFT_BODY":
- addPointCache(job, object, modifier.point_cache, default_path)
- elif modifier.type == "SMOKE" and modifier.smoke_type == "TYPE_DOMAIN":
- addPointCache(job, object, modifier.domain_settings.point_cache_low, default_path)
- if modifier.domain_settings.highres:
- addPointCache(job, object, modifier.domain_settings.point_cache_high, default_path)
+ if anim:
+ for f in range(scene.start_frame, scene.end_frame + 1):
+ job.addFrame(f)
+ else:
+ job.addFrame(scene.current_frame)
- # particles modifier are stupid and don't contain data
- # we have to go through the object property
- for psys in object.particle_systems:
- addPointCache(job, object, psys.point_cache, default_path)
-
- #print(job.files)
-
- job.name = job_name
- job.category = netsettings.job_category
-
- for slave in netrender.blacklist:
- job.blacklist.append(slave.id)
-
- job.chunks = netsettings.chunks
- job.priority = netsettings.priority
-
- # try to send path first
- conn.request("POST", "/job", repr(job.serialize()))
- response = conn.getresponse()
-
- job_id = response.getheader("job-id")
-
- # if not ACCEPTED (but not processed), send files
- if response.status == http.client.ACCEPTED:
- for rfile in job.files:
- f = open(rfile.filepath, "rb")
- conn.request("PUT", fileURL(job_id, rfile.index), f)
- f.close()
- response = conn.getresponse()
-
- # server will reply with ACCEPTED until all files are found
-
- return job_id
+ filename = bpy.data.filename
+ job.addFile(filename)
+
+ job_name = netsettings.job_name
+ path, name = os.path.split(filename)
+ if job_name == "[default]":
+ job_name = name
+
+ ###########################
+ # LIBRARIES
+ ###########################
+ for lib in bpy.data.libraries:
+ job.addFile(bpy.utils.expandpath(lib.filename))
+
+ ###########################
+ # IMAGES
+ ###########################
+ for image in bpy.data.images:
+ if image.source == "FILE" and not image.packed_file:
+ job.addFile(bpy.utils.expandpath(image.filename))
+
+ ###########################
+ # FLUID + POINT CACHE
+ ###########################
+ root, ext = os.path.splitext(name)
+ default_path = path + os.sep + "blendcache_" + root + os.sep # need an API call for that
+
+ for object in bpy.data.objects:
+ for modifier in object.modifiers:
+ if modifier.type == 'FLUID_SIMULATION' and modifier.settings.type == "DOMAIN":
+ addFluidFiles(job, bpy.utils.expandpath(modifier.settings.path))
+ elif modifier.type == "CLOTH":
+ addPointCache(job, object, modifier.point_cache, default_path)
+ elif modifier.type == "SOFT_BODY":
+ addPointCache(job, object, modifier.point_cache, default_path)
+ elif modifier.type == "SMOKE" and modifier.smoke_type == "TYPE_DOMAIN":
+ addPointCache(job, object, modifier.domain_settings.point_cache_low, default_path)
+ if modifier.domain_settings.highres:
+ addPointCache(job, object, modifier.domain_settings.point_cache_high, default_path)
+
+ # particles modifier are stupid and don't contain data
+ # we have to go through the object property
+ for psys in object.particle_systems:
+ addPointCache(job, object, psys.point_cache, default_path)
+
+ #print(job.files)
+
+ job.name = job_name
+ job.category = netsettings.job_category
+
+ for slave in netrender.blacklist:
+ job.blacklist.append(slave.id)
+
+ job.chunks = netsettings.chunks
+ job.priority = netsettings.priority
+
+ # try to send path first
+ conn.request("POST", "/job", repr(job.serialize()))
+ response = conn.getresponse()
+
+ job_id = response.getheader("job-id")
+
+ # if not ACCEPTED (but not processed), send files
+ if response.status == http.client.ACCEPTED:
+ for rfile in job.files:
+ f = open(rfile.filepath, "rb")
+ conn.request("PUT", fileURL(job_id, rfile.index), f)
+ f.close()
+ response = conn.getresponse()
+
+ # server will reply with ACCEPTED until all files are found
+
+ return job_id
def requestResult(conn, job_id, frame):
- conn.request("GET", renderURL(job_id, frame))
+ conn.request("GET", renderURL(job_id, frame))
@rnaType
class NetworkRenderEngine(bpy.types.RenderEngine):
- bl_idname = 'NET_RENDER'
- bl_label = "Network Render"
- def render(self, scene):
- if scene.network_render.mode == "RENDER_CLIENT":
- self.render_client(scene)
- elif scene.network_render.mode == "RENDER_SLAVE":
- self.render_slave(scene)
- elif scene.network_render.mode == "RENDER_MASTER":
- self.render_master(scene)
- else:
- print("UNKNOWN OPERATION MODE")
-
- def render_master(self, scene):
- netsettings = scene.network_render
-
- address = "" if netsettings.server_address == "[default]" else netsettings.server_address
-
- master.runMaster((address, netsettings.server_port), netsettings.server_broadcast, netsettings.path, self.update_stats, self.test_break)
+ bl_idname = 'NET_RENDER'
+ bl_label = "Network Render"
+ def render(self, scene):
+ if scene.network_render.mode == "RENDER_CLIENT":
+ self.render_client(scene)
+ elif scene.network_render.mode == "RENDER_SLAVE":
+ self.render_slave(scene)
+ elif scene.network_render.mode == "RENDER_MASTER":
+ self.render_master(scene)
+ else:
+ print("UNKNOWN OPERATION MODE")
+
+ def render_master(self, scene):
+ netsettings = scene.network_render
+
+ address = "" if netsettings.server_address == "[default]" else netsettings.server_address
+
+ master.runMaster((address, netsettings.server_port), netsettings.server_broadcast, netsettings.path, self.update_stats, self.test_break)
- def render_slave(self, scene):
- slave.render_slave(self, scene.network_render)
-
- def render_client(self, scene):
- netsettings = scene.network_render
- self.update_stats("", "Network render client initiation")
-
-
- conn = clientConnection(netsettings.server_address, netsettings.server_port)
-
- if conn:
- # Sending file
-
- self.update_stats("", "Network render exporting")
-
- new_job = False
-
- job_id = netsettings.job_id
-
- # reading back result
-
- self.update_stats("", "Network render waiting for results")
-
- requestResult(conn, job_id, scene.current_frame)
- response = conn.getresponse()
-
- if response.status == http.client.NO_CONTENT:
- new_job = True
- netsettings.job_id = clientSendJob(conn, scene)
- job_id = netsettings.job_id
-
- requestResult(conn, job_id, scene.current_frame)
- response = conn.getresponse()
-
- while response.status == http.client.ACCEPTED and not self.test_break():
- time.sleep(1)
- requestResult(conn, job_id, scene.current_frame)
- response = conn.getresponse()
-
- # cancel new jobs (animate on network) on break
- if self.test_break() and new_job:
- conn.request("POST", cancelURL(job_id))
- response = conn.getresponse()
- print( response.status, response.reason )
- netsettings.job_id = 0
-
- if response.status != http.client.OK:
- conn.close()
- return
-
- r = scene.render_data
- x= int(r.resolution_x*r.resolution_percentage*0.01)
- y= int(r.resolution_y*r.resolution_percentage*0.01)
-
- f = open(netsettings.path + "output.exr", "wb")
- buf = response.read(1024)
-
- while buf:
- f.write(buf)
- buf = response.read(1024)
-
- f.close()
-
- result = self.begin_result(0, 0, x, y)
- result.load_from_file(netsettings.path + "output.exr", 0, 0)
- self.end_result(result)
-
- conn.close()
+ def render_slave(self, scene):
+ slave.render_slave(self, scene.network_render)
+
+ def render_client(self, scene):
+ netsettings = scene.network_render
+ self.update_stats("", "Network render client initiation")
+
+
+ conn = clientConnection(netsettings.server_address, netsettings.server_port)
+
+ if conn:
+ # Sending file
+
+ self.update_stats("", "Network render exporting")
+
+ new_job = False
+
+ job_id = netsettings.job_id
+
+ # reading back result
+
+ self.update_stats("", "Network render waiting for results")
+
+ requestResult(conn, job_id, scene.current_frame)
+ response = conn.getresponse()
+
+ if response.status == http.client.NO_CONTENT:
+ new_job = True
+ netsettings.job_id = clientSendJob(conn, scene)
+ job_id = netsettings.job_id
+
+ requestResult(conn, job_id, scene.current_frame)
+ response = conn.getresponse()
+
+ while response.status == http.client.ACCEPTED and not self.test_break():
+ time.sleep(1)
+ requestResult(conn, job_id, scene.current_frame)
+ response = conn.getresponse()
+
+ # cancel new jobs (animate on network) on break
+ if self.test_break() and new_job:
+ conn.request("POST", cancelURL(job_id))
+ response = conn.getresponse()
+ print( response.status, response.reason )
+ netsettings.job_id = 0
+
+ if response.status != http.client.OK:
+ conn.close()
+ return
+
+ r = scene.render_data
+ x= int(r.resolution_x*r.resolution_percentage*0.01)
+ y= int(r.resolution_y*r.resolution_percentage*0.01)
+
+ f = open(netsettings.path + "output.exr", "wb")
+ buf = response.read(1024)
+
+ while buf:
+ f.write(buf)
+ buf = response.read(1024)
+
+ f.close()
+
+ result = self.begin_result(0, 0, x, y)
+ result.load_from_file(netsettings.path + "output.exr", 0, 0)
+ self.end_result(result)
+
+ conn.close()
def compatible(module):
- module = __import__(module)
- for subclass in module.__dict__.values():
- try: subclass.COMPAT_ENGINES.add('NET_RENDER')
- except: pass
- del module
+ module = __import__(module)
+ for subclass in module.__dict__.values():
+ try: subclass.COMPAT_ENGINES.add('NET_RENDER')
+ except: pass
+ del module
compatible("properties_render")
compatible("properties_world")
diff --git a/release/scripts/io/netrender/master.py b/release/scripts/io/netrender/master.py
index 83980c95da7..a33606d19a0 100644
--- a/release/scripts/io/netrender/master.py
+++ b/release/scripts/io/netrender/master.py
@@ -4,12 +4,12 @@
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
-#
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -26,135 +26,135 @@ import netrender.balancing
import netrender.master_html
class MRenderFile(netrender.model.RenderFile):
- def __init__(self, filepath, index, start, end):
- super().__init__(filepath, index, start, end)
- self.found = False
-
- def test(self):
- self.found = os.path.exists(self.filepath)
- return self.found
+ def __init__(self, filepath, index, start, end):
+ super().__init__(filepath, index, start, end)
+ self.found = False
+
+ def test(self):
+ self.found = os.path.exists(self.filepath)
+ return self.found
class MRenderSlave(netrender.model.RenderSlave):
- def __init__(self, name, address, stats):
- super().__init__()
- self.id = hashlib.md5(bytes(repr(name) + repr(address), encoding='utf8')).hexdigest()
- self.name = name
- self.address = address
- self.stats = stats
- self.last_seen = time.time()
-
- self.job = None
- self.job_frames = []
-
- netrender.model.RenderSlave._slave_map[self.id] = self
+ def __init__(self, name, address, stats):
+ super().__init__()
+ self.id = hashlib.md5(bytes(repr(name) + repr(address), encoding='utf8')).hexdigest()
+ self.name = name
+ self.address = address
+ self.stats = stats
+ self.last_seen = time.time()
- def seen(self):
- self.last_seen = time.time()
-
- def finishedFrame(self, frame_number):
- self.job_frames.remove(frame_number)
- if not self.job_frames:
- self.job = None
+ self.job = None
+ self.job_frames = []
+
+ netrender.model.RenderSlave._slave_map[self.id] = self
+
+ def seen(self):
+ self.last_seen = time.time()
+
+ def finishedFrame(self, frame_number):
+ self.job_frames.remove(frame_number)
+ if not self.job_frames:
+ self.job = None
class MRenderJob(netrender.model.RenderJob):
- def __init__(self, job_id, job_info):
- super().__init__(job_info)
- self.id = job_id
- self.last_dispatched = time.time()
-
- # force one chunk for process jobs
- if self.type == netrender.model.JOB_PROCESS:
- self.chunks = 1
-
- # Force WAITING status on creation
- self.status = JOB_WAITING
+ def __init__(self, job_id, job_info):
+ super().__init__(job_info)
+ self.id = job_id
+ self.last_dispatched = time.time()
- # special server properties
- self.last_update = 0
- self.save_path = ""
- self.files = [MRenderFile(rfile.filepath, rfile.index, rfile.start, rfile.end) for rfile in job_info.files]
-
- def save(self):
- if self.save_path:
- f = open(self.save_path + "job.txt", "w")
- f.write(repr(self.serialize()))
- f.close()
-
- def edit(self, info_map):
- if "status" in info_map:
- self.status = info_map["status"]
-
- if "priority" in info_map:
- self.priority = info_map["priority"]
-
- if "chunks" in info_map:
- self.chunks = info_map["chunks"]
-
- def testStart(self):
- for f in self.files:
- if not f.test():
- return False
-
- self.start()
- return True
-
- def testFinished(self):
- for f in self.frames:
- if f.status == QUEUED or f.status == DISPATCHED:
- break
- else:
- self.status = JOB_FINISHED
+ # force one chunk for process jobs
+ if self.type == netrender.model.JOB_PROCESS:
+ self.chunks = 1
- def start(self):
- self.status = JOB_QUEUED
-
- def addLog(self, frames):
- log_name = "_".join(("%04d" % f for f in frames)) + ".log"
- log_path = self.save_path + log_name
-
- for number in frames:
- frame = self[number]
- if frame:
- frame.log_path = log_path
-
- def addFrame(self, frame_number, command):
- frame = MRenderFrame(frame_number, command)
- self.frames.append(frame)
- return frame
-
- def reset(self, all):
- for f in self.frames:
- f.reset(all)
-
- def getFrames(self):
- frames = []
- for f in self.frames:
- if f.status == QUEUED:
- self.last_dispatched = time.time()
- frames.append(f)
- if len(frames) >= self.chunks:
- break
-
- return frames
+ # Force WAITING status on creation
+ self.status = JOB_WAITING
+
+ # special server properties
+ self.last_update = 0
+ self.save_path = ""
+ self.files = [MRenderFile(rfile.filepath, rfile.index, rfile.start, rfile.end) for rfile in job_info.files]
+
+ def save(self):
+ if self.save_path:
+ f = open(self.save_path + "job.txt", "w")
+ f.write(repr(self.serialize()))
+ f.close()
+
+ def edit(self, info_map):
+ if "status" in info_map:
+ self.status = info_map["status"]
+
+ if "priority" in info_map:
+ self.priority = info_map["priority"]
+
+ if "chunks" in info_map:
+ self.chunks = info_map["chunks"]
+
+ def testStart(self):
+ for f in self.files:
+ if not f.test():
+ return False
+
+ self.start()
+ return True
+
+ def testFinished(self):
+ for f in self.frames:
+ if f.status == QUEUED or f.status == DISPATCHED:
+ break
+ else:
+ self.status = JOB_FINISHED
+
+ def start(self):
+ self.status = JOB_QUEUED
+
+ def addLog(self, frames):
+ log_name = "_".join(("%04d" % f for f in frames)) + ".log"
+ log_path = self.save_path + log_name
+
+ for number in frames:
+ frame = self[number]
+ if frame:
+ frame.log_path = log_path
+
+ def addFrame(self, frame_number, command):
+ frame = MRenderFrame(frame_number, command)
+ self.frames.append(frame)
+ return frame
+
+ def reset(self, all):
+ for f in self.frames:
+ f.reset(all)
+
+ def getFrames(self):
+ frames = []
+ for f in self.frames:
+ if f.status == QUEUED:
+ self.last_dispatched = time.time()
+ frames.append(f)
+ if len(frames) >= self.chunks:
+ break
+
+ return frames
class MRenderFrame(netrender.model.RenderFrame):
- def __init__(self, frame, command):
- super().__init__()
- self.number = frame
- self.slave = None
- self.time = 0
- self.status = QUEUED
- self.command = command
-
- self.log_path = None
-
- def reset(self, all):
- if all or self.status == ERROR:
- self.log_path = None
- self.slave = None
- self.time = 0
- self.status = QUEUED
+ def __init__(self, frame, command):
+ super().__init__()
+ self.number = frame
+ self.slave = None
+ self.time = 0
+ self.status = QUEUED
+ self.command = command
+
+ self.log_path = None
+
+ def reset(self, all):
+ if all or self.status == ERROR:
+ self.log_path = None
+ self.slave = None
+ self.time = 0
+ self.status = QUEUED
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
@@ -169,692 +169,692 @@ cancel_pattern = re.compile("/cancel_([a-zA-Z0-9]+)")
edit_pattern = re.compile("/edit_([a-zA-Z0-9]+)")
class RenderHandler(http.server.BaseHTTPRequestHandler):
- def send_head(self, code = http.client.OK, headers = {}, content = "application/octet-stream"):
- self.send_response(code)
- self.send_header("Content-type", content)
-
- for key, value in headers.items():
- self.send_header(key, value)
-
- self.end_headers()
+ def send_head(self, code = http.client.OK, headers = {}, content = "application/octet-stream"):
+ self.send_response(code)
+ self.send_header("Content-type", content)
- def do_HEAD(self):
-
- if self.path == "/status":
- job_id = self.headers.get('job-id', "")
- job_frame = int(self.headers.get('job-frame', -1))
-
- job = self.server.getJobID(job_id)
- if job:
- frame = job[job_frame]
-
-
- if frame:
- self.send_head(http.client.OK)
- else:
- # no such frame
- self.send_head(http.client.NO_CONTENT)
- else:
- # no such job id
- self.send_head(http.client.NO_CONTENT)
-
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
- def do_GET(self):
-
- if self.path == "/version":
- self.send_head()
- self.server.stats("", "Version check")
- self.wfile.write(VERSION)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path.startswith("/render"):
- match = render_pattern.match(self.path)
-
- if match:
- job_id = match.groups()[0]
- frame_number = int(match.groups()[1])
-
- job = self.server.getJobID(job_id)
-
- if job:
- frame = job[frame_number]
-
- if frame:
- if frame.status in (QUEUED, DISPATCHED):
- self.send_head(http.client.ACCEPTED)
- elif frame.status == DONE:
- self.server.stats("", "Sending result to client")
- f = open(job.save_path + "%04d" % frame_number + ".exr", 'rb')
-
- self.send_head()
-
- shutil.copyfileobj(f, self.wfile)
-
- f.close()
- elif frame.status == ERROR:
- self.send_head(http.client.PARTIAL_CONTENT)
- else:
- # no such frame
- self.send_head(http.client.NO_CONTENT)
- else:
- # no such job id
- self.send_head(http.client.NO_CONTENT)
- else:
- # invalid url
- self.send_head(http.client.NO_CONTENT)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path.startswith("/log"):
- match = log_pattern.match(self.path)
-
- if match:
- job_id = match.groups()[0]
- frame_number = int(match.groups()[1])
-
- job = self.server.getJobID(job_id)
-
- if job:
- frame = job[frame_number]
-
- if frame:
- if not frame.log_path or frame.status in (QUEUED, DISPATCHED):
- self.send_head(http.client.PROCESSING)
- else:
- self.server.stats("", "Sending log to client")
- f = open(frame.log_path, 'rb')
-
- self.send_head(content = "text/plain")
-
- shutil.copyfileobj(f, self.wfile)
-
- f.close()
- else:
- # no such frame
- self.send_head(http.client.NO_CONTENT)
- else:
- # no such job id
- self.send_head(http.client.NO_CONTENT)
- else:
- # invalid URL
- self.send_head(http.client.NO_CONTENT)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path == "/status":
- job_id = self.headers.get('job-id', "")
- job_frame = int(self.headers.get('job-frame', -1))
-
- if job_id:
-
- job = self.server.getJobID(job_id)
- if job:
- if job_frame != -1:
- frame = job[frame]
-
- if frame:
- message = frame.serialize()
- else:
- # no such frame
- self.send_heat(http.client.NO_CONTENT)
- return
- else:
- message = job.serialize()
- else:
- # no such job id
- self.send_head(http.client.NO_CONTENT)
- return
- else: # status of all jobs
- message = []
-
- for job in self.server:
- message.append(job.serialize())
-
-
- self.server.stats("", "Sending status")
- self.send_head()
- self.wfile.write(bytes(repr(message), encoding='utf8'))
-
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path == "/job":
- self.server.balance()
-
- slave_id = self.headers['slave-id']
-
- slave = self.server.getSeenSlave(slave_id)
-
- if slave: # only if slave id is valid
- job, frames = self.server.newDispatch(slave_id)
-
- if job and frames:
- for f in frames:
- print("dispatch", f.number)
- f.status = DISPATCHED
- f.slave = slave
-
- slave.job = job
- slave.job_frames = [f.number for f in frames]
-
- self.send_head(headers={"job-id": job.id})
-
- message = job.serialize(frames)
-
- self.wfile.write(bytes(repr(message), encoding='utf8'))
-
- self.server.stats("", "Sending job to slave")
- else:
- # no job available, return error code
- slave.job = None
- slave.job_frames = []
-
- self.send_head(http.client.ACCEPTED)
- else: # invalid slave id
- self.send_head(http.client.NO_CONTENT)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path.startswith("/file"):
- match = file_pattern.match(self.path)
-
- if match:
- slave_id = self.headers['slave-id']
- slave = self.server.getSeenSlave(slave_id)
-
- if not slave:
- # invalid slave id
- print("invalid slave id")
+ for key, value in headers.items():
+ self.send_header(key, value)
- job_id = match.groups()[0]
- file_index = int(match.groups()[1])
-
- job = self.server.getJobID(job_id)
-
- if job:
- render_file = job.files[file_index]
-
- if render_file:
- self.server.stats("", "Sending file to slave")
- f = open(render_file.filepath, 'rb')
-
- self.send_head()
- shutil.copyfileobj(f, self.wfile)
-
- f.close()
- else:
- # no such file
- self.send_head(http.client.NO_CONTENT)
- else:
- # no such job id
- self.send_head(http.client.NO_CONTENT)
- else: # invalid url
- self.send_head(http.client.NO_CONTENT)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path == "/slaves":
- message = []
-
- self.server.stats("", "Sending slaves status")
-
- for slave in self.server.slaves:
- message.append(slave.serialize())
-
- self.send_head()
-
- self.wfile.write(bytes(repr(message), encoding='utf8'))
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- else:
- # hand over the rest to the html section
- netrender.master_html.get(self)
+ self.end_headers()
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- def do_POST(self):
-
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- if self.path == "/job":
-
- length = int(self.headers['content-length'])
-
- job_info = netrender.model.RenderJob.materialize(eval(str(self.rfile.read(length), encoding='utf8')))
-
- job_id = self.server.nextJobID()
-
- job = MRenderJob(job_id, job_info)
-
- for frame in job_info.frames:
- frame = job.addFrame(frame.number, frame.command)
-
- self.server.addJob(job)
-
- headers={"job-id": job_id}
-
- if job.testStart():
- self.server.stats("", "New job, started")
- self.send_head(headers=headers)
- else:
- self.server.stats("", "New job, missing files (%i total)" % len(job.files))
- self.send_head(http.client.ACCEPTED, headers=headers)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path.startswith("/edit"):
- match = edit_pattern.match(self.path)
+ def do_HEAD(self):
- if match:
- job_id = match.groups()[0]
-
- job = self.server.getJobID(job_id)
+ if self.path == "/status":
+ job_id = self.headers.get('job-id', "")
+ job_frame = int(self.headers.get('job-frame', -1))
- if job:
- length = int(self.headers['content-length'])
- info_map = eval(str(self.rfile.read(length), encoding='utf8'))
-
- job.edit(info_map)
- self.send_head()
- else:
- # no such job id
- self.send_head(http.client.NO_CONTENT)
- else:
- # invalid url
- self.send_head(http.client.NO_CONTENT)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path.startswith("/cancel"):
- match = cancel_pattern.match(self.path)
+ job = self.server.getJobID(job_id)
+ if job:
+ frame = job[job_frame]
- if match:
- job_id = match.groups()[0]
-
- job = self.server.getJobID(job_id)
-
- if job:
- self.server.stats("", "Cancelling job")
- self.server.removeJob(job)
- self.send_head()
- else:
- # no such job id
- self.send_head(http.client.NO_CONTENT)
- else:
- # invalid url
- self.send_head(http.client.NO_CONTENT)
-
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path == "/clear":
- # cancel all jobs
- self.server.stats("", "Clearing jobs")
- self.server.clear()
-
- self.send_head()
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path.startswith("/reset"):
- match = reset_pattern.match(self.path)
-
- if match:
- all = match.groups()[0] == 'all'
- job_id = match.groups()[1]
- job_frame = int(match.groups()[2])
- job = self.server.getJobID(job_id)
-
- if job:
- if job_frame != 0:
-
- frame = job[job_frame]
- if frame:
- self.server.stats("", "Reset job frame")
- frame.reset(all)
- self.send_head()
- else:
- # no such frame
- self.send_head(http.client.NO_CONTENT)
-
- else:
- self.server.stats("", "Reset job")
- job.reset(all)
- self.send_head()
-
- else: # job not found
- self.send_head(http.client.NO_CONTENT)
- else: # invalid url
- self.send_head(http.client.NO_CONTENT)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path == "/slave":
- length = int(self.headers['content-length'])
- job_frame_string = self.headers['job-frame']
-
- self.server.stats("", "New slave connected")
-
- slave_info = netrender.model.RenderSlave.materialize(eval(str(self.rfile.read(length), encoding='utf8')), cache = False)
-
- slave_id = self.server.addSlave(slave_info.name, self.client_address, slave_info.stats)
-
- self.send_head(headers = {"slave-id": slave_id})
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path == "/log":
- length = int(self.headers['content-length'])
-
- log_info = netrender.model.LogFile.materialize(eval(str(self.rfile.read(length), encoding='utf8')))
-
- slave_id = log_info.slave_id
-
- slave = self.server.getSeenSlave(slave_id)
-
- if slave: # only if slave id is valid
- job = self.server.getJobID(log_info.job_id)
-
- if job:
- self.server.stats("", "Log announcement")
- job.addLog(log_info.frames)
- self.send_head(http.client.OK)
- else:
- # no such job id
- self.send_head(http.client.NO_CONTENT)
- else: # invalid slave id
- self.send_head(http.client.NO_CONTENT)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- def do_PUT(self):
-
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- if self.path.startswith("/file"):
- match = file_pattern.match(self.path)
-
- if match:
- self.server.stats("", "Receiving job")
+ if frame:
+ self.send_head(http.client.OK)
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
- length = int(self.headers['content-length'])
- job_id = match.groups()[0]
- file_index = int(match.groups()[1])
-
- job = self.server.getJobID(job_id)
-
- if job:
-
- render_file = job.files[file_index]
-
- if render_file:
- main_file = job.files[0].filepath # filename of the first file
-
- main_path, main_name = os.path.split(main_file)
-
- if file_index > 0:
- file_path = prefixPath(job.save_path, render_file.filepath, main_path)
- else:
- file_path = job.save_path + main_name
-
- buf = self.rfile.read(length)
-
- # add same temp file + renames as slave
-
- f = open(file_path, "wb")
- f.write(buf)
- f.close()
- del buf
-
- render_file.filepath = file_path # set the new path
-
- if job.testStart():
- self.server.stats("", "File upload, starting job")
- self.send_head(http.client.OK)
- else:
- self.server.stats("", "File upload, file missings")
- self.send_head(http.client.ACCEPTED)
- else: # invalid file
- print("file not found", job_id, file_index)
- self.send_head(http.client.NO_CONTENT)
- else: # job not found
- print("job not found", job_id, file_index)
- self.send_head(http.client.NO_CONTENT)
- else: # invalid url
- print("no match")
- self.send_head(http.client.NO_CONTENT)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path == "/render":
- self.server.stats("", "Receiving render result")
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- # need some message content here or the slave doesn't like it
- self.wfile.write(bytes("foo", encoding='utf8'))
-
- slave_id = self.headers['slave-id']
-
- slave = self.server.getSeenSlave(slave_id)
-
- if slave: # only if slave id is valid
- job_id = self.headers['job-id']
-
- job = self.server.getJobID(job_id)
-
- if job:
- job_frame = int(self.headers['job-frame'])
- job_result = int(self.headers['job-result'])
- job_time = float(self.headers['job-time'])
-
- frame = job[job_frame]
-
- if frame:
- if job.type == netrender.model.JOB_BLENDER:
- if job_result == DONE:
- length = int(self.headers['content-length'])
- buf = self.rfile.read(length)
- f = open(job.save_path + "%04d" % job_frame + ".exr", 'wb')
- f.write(buf)
- f.close()
-
- del buf
- elif job_result == ERROR:
- # blacklist slave on this job on error
- # slaves might already be in blacklist if errors on the whole chunk
- if not slave.id in job.blacklist:
- job.blacklist.append(slave.id)
-
- self.server.stats("", "Receiving result")
-
- slave.finishedFrame(job_frame)
-
- frame.status = job_result
- frame.time = job_time
-
- job.testFinished()
-
- self.send_head()
- else: # frame not found
- self.send_head(http.client.NO_CONTENT)
- else: # job not found
- self.send_head(http.client.NO_CONTENT)
- else: # invalid slave id
- self.send_head(http.client.NO_CONTENT)
- # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
- elif self.path.startswith("/log"):
- self.server.stats("", "Receiving log file")
+ def do_GET(self):
- match = log_pattern.match(self.path)
-
- if match:
- job_id = match.groups()[0]
-
- job = self.server.getJobID(job_id)
-
- if job:
- job_frame = int(match.groups()[1])
-
- frame = job[job_frame]
-
- if frame and frame.log_path:
- length = int(self.headers['content-length'])
- buf = self.rfile.read(length)
- f = open(frame.log_path, 'ab')
- f.write(buf)
- f.close()
-
- del buf
-
- self.server.getSeenSlave(self.headers['slave-id'])
-
- self.send_head()
- else: # frame not found
- self.send_head(http.client.NO_CONTENT)
- else: # job not found
- self.send_head(http.client.NO_CONTENT)
- else: # invalid url
- self.send_head(http.client.NO_CONTENT)
+ if self.path == "/version":
+ self.send_head()
+ self.server.stats("", "Version check")
+ self.wfile.write(VERSION)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/render"):
+ match = render_pattern.match(self.path)
+
+ if match:
+ job_id = match.groups()[0]
+ frame_number = int(match.groups()[1])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ frame = job[frame_number]
+
+ if frame:
+ if frame.status in (QUEUED, DISPATCHED):
+ self.send_head(http.client.ACCEPTED)
+ elif frame.status == DONE:
+ self.server.stats("", "Sending result to client")
+ f = open(job.save_path + "%04d" % frame_number + ".exr", 'rb')
+
+ self.send_head()
+
+ shutil.copyfileobj(f, self.wfile)
+
+ f.close()
+ elif frame.status == ERROR:
+ self.send_head(http.client.PARTIAL_CONTENT)
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/log"):
+ match = log_pattern.match(self.path)
+
+ if match:
+ job_id = match.groups()[0]
+ frame_number = int(match.groups()[1])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ frame = job[frame_number]
+
+ if frame:
+ if not frame.log_path or frame.status in (QUEUED, DISPATCHED):
+ self.send_head(http.client.PROCESSING)
+ else:
+ self.server.stats("", "Sending log to client")
+ f = open(frame.log_path, 'rb')
+
+ self.send_head(content = "text/plain")
+
+ shutil.copyfileobj(f, self.wfile)
+
+ f.close()
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # invalid URL
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/status":
+ job_id = self.headers.get('job-id', "")
+ job_frame = int(self.headers.get('job-frame', -1))
+
+ if job_id:
+
+ job = self.server.getJobID(job_id)
+ if job:
+ if job_frame != -1:
+ frame = job[frame]
+
+ if frame:
+ message = frame.serialize()
+ else:
+ # no such frame
+ self.send_heat(http.client.NO_CONTENT)
+ return
+ else:
+ message = job.serialize()
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ return
+ else: # status of all jobs
+ message = []
+
+ for job in self.server:
+ message.append(job.serialize())
+
+
+ self.server.stats("", "Sending status")
+ self.send_head()
+ self.wfile.write(bytes(repr(message), encoding='utf8'))
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/job":
+ self.server.balance()
+
+ slave_id = self.headers['slave-id']
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ job, frames = self.server.newDispatch(slave_id)
+
+ if job and frames:
+ for f in frames:
+ print("dispatch", f.number)
+ f.status = DISPATCHED
+ f.slave = slave
+
+ slave.job = job
+ slave.job_frames = [f.number for f in frames]
+
+ self.send_head(headers={"job-id": job.id})
+
+ message = job.serialize(frames)
+
+ self.wfile.write(bytes(repr(message), encoding='utf8'))
+
+ self.server.stats("", "Sending job to slave")
+ else:
+ # no job available, return error code
+ slave.job = None
+ slave.job_frames = []
+
+ self.send_head(http.client.ACCEPTED)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/file"):
+ match = file_pattern.match(self.path)
+
+ if match:
+ slave_id = self.headers['slave-id']
+ slave = self.server.getSeenSlave(slave_id)
+
+ if not slave:
+ # invalid slave id
+ print("invalid slave id")
+
+ job_id = match.groups()[0]
+ file_index = int(match.groups()[1])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ render_file = job.files[file_index]
+
+ if render_file:
+ self.server.stats("", "Sending file to slave")
+ f = open(render_file.filepath, 'rb')
+
+ self.send_head()
+ shutil.copyfileobj(f, self.wfile)
+
+ f.close()
+ else:
+ # no such file
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/slaves":
+ message = []
+
+ self.server.stats("", "Sending slaves status")
+
+ for slave in self.server.slaves:
+ message.append(slave.serialize())
+
+ self.send_head()
+
+ self.wfile.write(bytes(repr(message), encoding='utf8'))
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ else:
+ # hand over the rest to the html section
+ netrender.master_html.get(self)
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ def do_POST(self):
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ if self.path == "/job":
+
+ length = int(self.headers['content-length'])
+
+ job_info = netrender.model.RenderJob.materialize(eval(str(self.rfile.read(length), encoding='utf8')))
+
+ job_id = self.server.nextJobID()
+
+ job = MRenderJob(job_id, job_info)
+
+ for frame in job_info.frames:
+ frame = job.addFrame(frame.number, frame.command)
+
+ self.server.addJob(job)
+
+ headers={"job-id": job_id}
+
+ if job.testStart():
+ self.server.stats("", "New job, started")
+ self.send_head(headers=headers)
+ else:
+ self.server.stats("", "New job, missing files (%i total)" % len(job.files))
+ self.send_head(http.client.ACCEPTED, headers=headers)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/edit"):
+ match = edit_pattern.match(self.path)
+
+ if match:
+ job_id = match.groups()[0]
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ length = int(self.headers['content-length'])
+ info_map = eval(str(self.rfile.read(length), encoding='utf8'))
+
+ job.edit(info_map)
+ self.send_head()
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/cancel"):
+ match = cancel_pattern.match(self.path)
+
+ if match:
+ job_id = match.groups()[0]
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ self.server.stats("", "Cancelling job")
+ self.server.removeJob(job)
+ self.send_head()
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # invalid url
+ self.send_head(http.client.NO_CONTENT)
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/clear":
+ # cancel all jobs
+ self.server.stats("", "Clearing jobs")
+ self.server.clear()
+
+ self.send_head()
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/reset"):
+ match = reset_pattern.match(self.path)
+
+ if match:
+ all = match.groups()[0] == 'all'
+ job_id = match.groups()[1]
+ job_frame = int(match.groups()[2])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ if job_frame != 0:
+
+ frame = job[job_frame]
+ if frame:
+ self.server.stats("", "Reset job frame")
+ frame.reset(all)
+ self.send_head()
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+
+ else:
+ self.server.stats("", "Reset job")
+ job.reset(all)
+ self.send_head()
+
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/slave":
+ length = int(self.headers['content-length'])
+ job_frame_string = self.headers['job-frame']
+
+ self.server.stats("", "New slave connected")
+
+ slave_info = netrender.model.RenderSlave.materialize(eval(str(self.rfile.read(length), encoding='utf8')), cache = False)
+
+ slave_id = self.server.addSlave(slave_info.name, self.client_address, slave_info.stats)
+
+ self.send_head(headers = {"slave-id": slave_id})
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/log":
+ length = int(self.headers['content-length'])
+
+ log_info = netrender.model.LogFile.materialize(eval(str(self.rfile.read(length), encoding='utf8')))
+
+ slave_id = log_info.slave_id
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ job = self.server.getJobID(log_info.job_id)
+
+ if job:
+ self.server.stats("", "Log announcement")
+ job.addLog(log_info.frames)
+ self.send_head(http.client.OK)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ def do_PUT(self):
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ if self.path.startswith("/file"):
+ match = file_pattern.match(self.path)
+
+ if match:
+ self.server.stats("", "Receiving job")
+
+ length = int(self.headers['content-length'])
+ job_id = match.groups()[0]
+ file_index = int(match.groups()[1])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+
+ render_file = job.files[file_index]
+
+ if render_file:
+ main_file = job.files[0].filepath # filename of the first file
+
+ main_path, main_name = os.path.split(main_file)
+
+ if file_index > 0:
+ file_path = prefixPath(job.save_path, render_file.filepath, main_path)
+ else:
+ file_path = job.save_path + main_name
+
+ buf = self.rfile.read(length)
+
+ # add same temp file + renames as slave
+
+ f = open(file_path, "wb")
+ f.write(buf)
+ f.close()
+ del buf
+
+ render_file.filepath = file_path # set the new path
+
+ if job.testStart():
+ self.server.stats("", "File upload, starting job")
+ self.send_head(http.client.OK)
+ else:
+ self.server.stats("", "File upload, file missings")
+ self.send_head(http.client.ACCEPTED)
+ else: # invalid file
+ print("file not found", job_id, file_index)
+ self.send_head(http.client.NO_CONTENT)
+ else: # job not found
+ print("job not found", job_id, file_index)
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid url
+ print("no match")
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/render":
+ self.server.stats("", "Receiving render result")
+
+ # need some message content here or the slave doesn't like it
+ self.wfile.write(bytes("foo", encoding='utf8'))
+
+ slave_id = self.headers['slave-id']
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ job_id = self.headers['job-id']
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ job_frame = int(self.headers['job-frame'])
+ job_result = int(self.headers['job-result'])
+ job_time = float(self.headers['job-time'])
+
+ frame = job[job_frame]
+
+ if frame:
+ if job.type == netrender.model.JOB_BLENDER:
+ if job_result == DONE:
+ length = int(self.headers['content-length'])
+ buf = self.rfile.read(length)
+ f = open(job.save_path + "%04d" % job_frame + ".exr", 'wb')
+ f.write(buf)
+ f.close()
+
+ del buf
+ elif job_result == ERROR:
+ # blacklist slave on this job on error
+ # slaves might already be in blacklist if errors on the whole chunk
+ if not slave.id in job.blacklist:
+ job.blacklist.append(slave.id)
+
+ self.server.stats("", "Receiving result")
+
+ slave.finishedFrame(job_frame)
+
+ frame.status = job_result
+ frame.time = job_time
+
+ job.testFinished()
+
+ self.send_head()
+ else: # frame not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/log"):
+ self.server.stats("", "Receiving log file")
+
+ match = log_pattern.match(self.path)
+
+ if match:
+ job_id = match.groups()[0]
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ job_frame = int(match.groups()[1])
+
+ frame = job[job_frame]
+
+ if frame and frame.log_path:
+ length = int(self.headers['content-length'])
+ buf = self.rfile.read(length)
+ f = open(frame.log_path, 'ab')
+ f.write(buf)
+ f.close()
+
+ del buf
+
+ self.server.getSeenSlave(self.headers['slave-id'])
+
+ self.send_head()
+ else: # frame not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid url
+ self.send_head(http.client.NO_CONTENT)
class RenderMasterServer(http.server.HTTPServer):
- def __init__(self, address, handler_class, path):
- super().__init__(address, handler_class)
- self.jobs = []
- self.jobs_map = {}
- self.slaves = []
- self.slaves_map = {}
- self.job_id = 0
- self.path = path + "master_" + str(os.getpid()) + os.sep
-
- self.slave_timeout = 30 # 30 mins: need a parameter for that
-
- self.balancer = netrender.balancing.Balancer()
- self.balancer.addRule(netrender.balancing.RatingUsageByCategory(self.getJobs))
- self.balancer.addRule(netrender.balancing.RatingUsage())
- self.balancer.addException(netrender.balancing.ExcludeQueuedEmptyJob())
- self.balancer.addException(netrender.balancing.ExcludeSlavesLimit(self.countJobs, self.countSlaves, limit = 0.9))
- self.balancer.addPriority(netrender.balancing.NewJobPriority())
- self.balancer.addPriority(netrender.balancing.MinimumTimeBetweenDispatchPriority(limit = 2))
-
- if not os.path.exists(self.path):
- os.mkdir(self.path)
-
- def nextJobID(self):
- self.job_id += 1
- return str(self.job_id)
-
- def addSlave(self, name, address, stats):
- slave = MRenderSlave(name, address, stats)
- self.slaves.append(slave)
- self.slaves_map[slave.id] = slave
-
- return slave.id
-
- def removeSlave(self, slave):
- self.slaves.remove(slave)
- self.slaves_map.pop(slave.id)
-
- def getSlave(self, slave_id):
- return self.slaves_map.get(slave_id, None)
-
- def getSeenSlave(self, slave_id):
- slave = self.getSlave(slave_id)
- if slave:
- slave.seen()
-
- return slave
-
- def timeoutSlaves(self):
- removed = []
-
- t = time.time()
-
- for slave in self.slaves:
- if (t - slave.last_seen) / 60 > self.slave_timeout:
- removed.append(slave)
-
- if slave.job:
- for f in slave.job_frames:
- slave.job[f].status = ERROR
-
- for slave in removed:
- self.removeSlave(slave)
-
- def updateUsage(self):
- blend = 0.5
- for job in self.jobs:
- job.usage *= (1 - blend)
-
- if self.slaves:
- slave_usage = blend / self.countSlaves()
-
- for slave in self.slaves:
- if slave.job:
- slave.job.usage += slave_usage
-
-
- def clear(self):
- removed = self.jobs[:]
-
- for job in removed:
- self.removeJob(job)
-
- def balance(self):
- self.balancer.balance(self.jobs)
-
- def getJobs(self):
- return self.jobs
-
- def countJobs(self, status = JOB_QUEUED):
- total = 0
- for j in self.jobs:
- if j.status == status:
- total += 1
-
- return total
-
- def countSlaves(self):
- return len(self.slaves)
-
- def removeJob(self, job):
- self.jobs.remove(job)
- self.jobs_map.pop(job.id)
-
- for slave in self.slaves:
- if slave.job == job:
- slave.job = None
- slave.job_frames = []
-
- def addJob(self, job):
- self.jobs.append(job)
- self.jobs_map[job.id] = job
-
- # create job directory
- job.save_path = self.path + "job_" + job.id + os.sep
- if not os.path.exists(job.save_path):
- os.mkdir(job.save_path)
-
- job.save()
-
- def getJobID(self, id):
- return self.jobs_map.get(id, None)
-
- def __iter__(self):
- for job in self.jobs:
- yield job
-
- def newDispatch(self, slave_id):
- if self.jobs:
- for job in self.jobs:
- if not self.balancer.applyExceptions(job) and slave_id not in job.blacklist:
- return job, job.getFrames()
-
- return None, None
+ def __init__(self, address, handler_class, path):
+ super().__init__(address, handler_class)
+ self.jobs = []
+ self.jobs_map = {}
+ self.slaves = []
+ self.slaves_map = {}
+ self.job_id = 0
+ self.path = path + "master_" + str(os.getpid()) + os.sep
+
+ self.slave_timeout = 30 # 30 mins: need a parameter for that
+
+ self.balancer = netrender.balancing.Balancer()
+ self.balancer.addRule(netrender.balancing.RatingUsageByCategory(self.getJobs))
+ self.balancer.addRule(netrender.balancing.RatingUsage())
+ self.balancer.addException(netrender.balancing.ExcludeQueuedEmptyJob())
+ self.balancer.addException(netrender.balancing.ExcludeSlavesLimit(self.countJobs, self.countSlaves, limit = 0.9))
+ self.balancer.addPriority(netrender.balancing.NewJobPriority())
+ self.balancer.addPriority(netrender.balancing.MinimumTimeBetweenDispatchPriority(limit = 2))
+
+ if not os.path.exists(self.path):
+ os.mkdir(self.path)
+
+ def nextJobID(self):
+ self.job_id += 1
+ return str(self.job_id)
+
+ def addSlave(self, name, address, stats):
+ slave = MRenderSlave(name, address, stats)
+ self.slaves.append(slave)
+ self.slaves_map[slave.id] = slave
+
+ return slave.id
+
+ def removeSlave(self, slave):
+ self.slaves.remove(slave)
+ self.slaves_map.pop(slave.id)
+
+ def getSlave(self, slave_id):
+ return self.slaves_map.get(slave_id, None)
+
+ def getSeenSlave(self, slave_id):
+ slave = self.getSlave(slave_id)
+ if slave:
+ slave.seen()
+
+ return slave
+
+ def timeoutSlaves(self):
+ removed = []
+
+ t = time.time()
+
+ for slave in self.slaves:
+ if (t - slave.last_seen) / 60 > self.slave_timeout:
+ removed.append(slave)
+
+ if slave.job:
+ for f in slave.job_frames:
+ slave.job[f].status = ERROR
+
+ for slave in removed:
+ self.removeSlave(slave)
+
+ def updateUsage(self):
+ blend = 0.5
+ for job in self.jobs:
+ job.usage *= (1 - blend)
+
+ if self.slaves:
+ slave_usage = blend / self.countSlaves()
+
+ for slave in self.slaves:
+ if slave.job:
+ slave.job.usage += slave_usage
+
+
+ def clear(self):
+ removed = self.jobs[:]
+
+ for job in removed:
+ self.removeJob(job)
+
+ def balance(self):
+ self.balancer.balance(self.jobs)
+
+ def getJobs(self):
+ return self.jobs
+
+ def countJobs(self, status = JOB_QUEUED):
+ total = 0
+ for j in self.jobs:
+ if j.status == status:
+ total += 1
+
+ return total
+
+ def countSlaves(self):
+ return len(self.slaves)
+
+ def removeJob(self, job):
+ self.jobs.remove(job)
+ self.jobs_map.pop(job.id)
+
+ for slave in self.slaves:
+ if slave.job == job:
+ slave.job = None
+ slave.job_frames = []
+
+ def addJob(self, job):
+ self.jobs.append(job)
+ self.jobs_map[job.id] = job
+
+ # create job directory
+ job.save_path = self.path + "job_" + job.id + os.sep
+ if not os.path.exists(job.save_path):
+ os.mkdir(job.save_path)
+
+ job.save()
+
+ def getJobID(self, id):
+ return self.jobs_map.get(id, None)
+
+ def __iter__(self):
+ for job in self.jobs:
+ yield job
+
+ def newDispatch(self, slave_id):
+ if self.jobs:
+ for job in self.jobs:
+ if not self.balancer.applyExceptions(job) and slave_id not in job.blacklist:
+ return job, job.getFrames()
+
+ return None, None
def runMaster(address, broadcast, path, update_stats, test_break):
- httpd = RenderMasterServer(address, RenderHandler, path)
- httpd.timeout = 1
- httpd.stats = update_stats
-
- if broadcast:
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+ httpd = RenderMasterServer(address, RenderHandler, path)
+ httpd.timeout = 1
+ httpd.stats = update_stats
- start_time = time.time()
-
- while not test_break():
- httpd.handle_request()
-
- if time.time() - start_time >= 10: # need constant here
- httpd.timeoutSlaves()
-
- httpd.updateUsage()
-
- if broadcast:
- print("broadcasting address")
- s.sendto(bytes("%i" % address[1], encoding='utf8'), 0, ('', 8000))
- start_time = time.time()
-
- httpd.server_close()
+ if broadcast:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+
+ start_time = time.time()
+
+ while not test_break():
+ httpd.handle_request()
+
+ if time.time() - start_time >= 10: # need constant here
+ httpd.timeoutSlaves()
+
+ httpd.updateUsage()
+
+ if broadcast:
+ print("broadcasting address")
+ s.sendto(bytes("%i" % address[1], encoding='utf8'), 0, ('', 8000))
+ start_time = time.time()
+
+ httpd.server_close()
diff --git a/release/scripts/io/netrender/master_html.py b/release/scripts/io/netrender/master_html.py
index 1f530efa7c6..af4db43ae9d 100644
--- a/release/scripts/io/netrender/master_html.py
+++ b/release/scripts/io/netrender/master_html.py
@@ -4,12 +4,12 @@
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
-#
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
-#
+#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -24,226 +24,226 @@ from netrender.utils import *
src_folder = os.path.split(__file__)[0]
def get(handler):
- def output(text):
- handler.wfile.write(bytes(text, encoding='utf8'))
-
- def head(title):
- output("")
- output("")
+ def output(text):
+ handler.wfile.write(bytes(text, encoding='utf8'))
+
+ def head(title):
+ output("")
+ output("")
# output("")
- output("")
- output(title)
- output("")
- output("")
+ output("")
+ output(title)
+ output("")
+ output("")
-
- def link(text, url):
- return "%s" % (url, text)
-
- def startTable(border=1, class_style = None, caption = None):
- output("")
-
- if caption:
- output("%s" % caption)
-
- def headerTable(*headers):
- output("")
-
- for c in headers:
- output("| " + c + " | ")
-
- output("
")
-
- def rowTable(*data, id = None, class_style = None, extra = None):
- output("%s" % (url, text)
- output(">")
-
- for c in data:
- output("| " + str(c) + " | ")
-
- output("
")
-
- def endTable():
- output("
")
-
- if handler.path == "/html/netrender.js":
- f = open(os.path.join(src_folder, "netrender.js"), 'rb')
-
- handler.send_head(content = "text/javascript")
- shutil.copyfileobj(f, handler.wfile)
-
- f.close()
- elif handler.path == "/html/netrender.css":
- f = open(os.path.join(src_folder, "netrender.css"), 'rb')
-
- handler.send_head(content = "text/css")
- shutil.copyfileobj(f, handler.wfile)
-
- f.close()
- elif handler.path == "/html" or handler.path == "/":
- handler.send_head(content = "text/html")
- head("NetRender")
-
- output("Master
")
-
- output("""""")
+ def startTable(border=1, class_style = None, caption = None):
+ output("")
- for rule in handler.server.balancer.rules:
- rowTable("rating", rule, rule.str_limit() if hasattr(rule, "limit") else " ")
+ if caption:
+ output("%s" % caption)
- for rule in handler.server.balancer.priorities:
- rowTable("priority", rule, rule.str_limit() if hasattr(rule, "limit") else " ")
-
- for rule in handler.server.balancer.exceptions:
- rowTable("exception", rule, rule.str_limit() if hasattr(rule, "limit") else " ")
+ def headerTable(*headers):
+ output("")
- endTable()
+ for c in headers:
+ output("| " + c + " | ")
- output("Slaves
")
-
- startTable()
- headerTable("name", "address", "last seen", "stats", "job")
-
- for slave in handler.server.slaves:
- rowTable(slave.name, slave.address[0], time.ctime(slave.last_seen), slave.stats, link(slave.job.name, "/html/job" + slave.job.id) if slave.job else "None")
-
- endTable()
-
- output("Jobs
")
-
- startTable()
- headerTable(
- " ",
- "id",
- "name",
- "category",
- "chunks",
- "priority",
- "usage",
- "wait",
- "status",
- "length",
- "done",
- "dispatched",
- "error",
- "first",
- "exception"
- )
+ output("
")
- handler.server.balance()
-
- for job in handler.server.jobs:
- results = job.framesStatus()
- rowTable(
- """""" % job.id +
- """""" % job.id,
- job.id,
- link(job.name, "/html/job" + job.id),
- job.category if job.category else "None",
- str(job.chunks) +
- """""" % (job.id, job.chunks + 1) +
- """""" % (job.id, job.chunks - 1, "disabled=True" if job.chunks == 1 else ""),
- str(job.priority) +
- """""" % (job.id, job.priority + 1) +
- """""" % (job.id, job.priority - 1, "disabled=True" if job.priority == 1 else ""),
- "%0.1f%%" % (job.usage * 100),
- "%is" % int(time.time() - job.last_dispatched),
- job.statusText(),
- len(job),
- results[DONE],
- results[DISPATCHED],
- str(results[ERROR]) +
- """""" % (job.id, "disabled=True" if not results[ERROR] else ""),
- handler.server.balancer.applyPriorities(job), handler.server.balancer.applyExceptions(job)
- )
-
- endTable()
-
- output("")
-
- elif handler.path.startswith("/html/job"):
- handler.send_head(content = "text/html")
- job_id = handler.path[9:]
-
- head("NetRender")
-
- job = handler.server.getJobID(job_id)
-
- if job:
- output("Files
")
-
- startTable()
- headerTable("path")
-
- tot_cache = 0
- tot_fluid = 0
-
- for file in job.files:
- if file.filepath.endswith(".bphys"):
- tot_cache += 1
- elif file.filepath.endswith(".bobj.gz") or file.filepath.endswith(".bvel.gz"):
- tot_fluid += 1
- else:
- rowTable(file.filepath)
+ def rowTable(*data, id = None, class_style = None, extra = None):
+ output(" 0:
- rowTable("%i physic cache files" % tot_cache, class_style = "toggle", extra = "onclick='toggleDisplay(".cache", "none", "table-row")'")
- for file in job.files:
- if file.filepath.endswith(".bphys"):
- rowTable(os.path.split(file.filepath)[1], class_style = "cache")
-
- if tot_fluid > 0:
- rowTable("%i fluid bake files" % tot_fluid, class_style = "toggle", extra = "onclick='toggleDisplay(".fluid", "none", "table-row")'")
- for file in job.files:
- if file.filepath.endswith(".bobj.gz") or file.filepath.endswith(".bvel.gz"):
- rowTable(os.path.split(file.filepath)[1], class_style = "fluid")
+ if id:
+ output(" id='%s'" % id)
- endTable()
-
- output("Blacklist
")
-
- if job.blacklist:
- startTable()
- headerTable("name", "address")
-
- for slave_id in job.blacklist:
- slave = handler.server.slaves_map[slave_id]
- rowTable(slave.name, slave.address[0])
-
- endTable()
- else:
- output("Empty")
+ if class_style:
+ output(" class='%s'" % class_style)
- output("Frames
")
-
- startTable()
- headerTable("no", "status", "render time", "slave", "log", "result")
-
- for frame in job.frames:
- rowTable(frame.number, frame.statusText(), "%.1fs" % frame.time, frame.slave.name if frame.slave else " ", link("view log", logURL(job_id, frame.number)) if frame.log_path else " ", link("view result", renderURL(job_id, frame.number)) if frame.status == DONE else " ")
-
- endTable()
- else:
- output("no such job")
-
- output("")
+ if extra:
+ output(" %s" % extra)
+
+ output(">")
+
+ for c in data:
+ output("" + str(c) + " | ")
+
+ output("
")
+
+ def endTable():
+ output("
")
+
+ if handler.path == "/html/netrender.js":
+ f = open(os.path.join(src_folder, "netrender.js"), 'rb')
+
+ handler.send_head(content = "text/javascript")
+ shutil.copyfileobj(f, handler.wfile)
+
+ f.close()
+ elif handler.path == "/html/netrender.css":
+ f = open(os.path.join(src_folder, "netrender.css"), 'rb')
+
+ handler.send_head(content = "text/css")
+ shutil.copyfileobj(f, handler.wfile)
+
+ f.close()
+ elif handler.path == "/html" or handler.path == "/":
+ handler.send_head(content = "text/html")
+ head("NetRender")
+
+ output("Master
")
+
+ output("""""")
+
+ startTable(caption = "Rules", class_style = "rules")
+
+ headerTable("type", "description", "limit")
+
+ for rule in handler.server.balancer.rules:
+ rowTable("rating", rule, rule.str_limit() if hasattr(rule, "limit") else " ")
+
+ for rule in handler.server.balancer.priorities:
+ rowTable("priority", rule, rule.str_limit() if hasattr(rule, "limit") else " ")
+
+ for rule in handler.server.balancer.exceptions:
+ rowTable("exception", rule, rule.str_limit() if hasattr(rule, "limit") else " ")
+
+ endTable()
+
+ output("Slaves
")
+
+ startTable()
+ headerTable("name", "address", "last seen", "stats", "job")
+
+ for slave in handler.server.slaves:
+ rowTable(slave.name, slave.address[0], time.ctime(slave.last_seen), slave.stats, link(slave.job.name, "/html/job" + slave.job.id) if slave.job else "None")
+
+ endTable()
+
+ output("Jobs
")
+
+ startTable()
+ headerTable(
+ " ",
+ "id",
+ "name",
+ "category",
+ "chunks",
+ "priority",
+ "usage",
+ "wait",
+ "status",
+ "length",
+ "done",
+ "dispatched",
+ "error",
+ "first",
+ "exception"
+ )
+
+ handler.server.balance()
+
+ for job in handler.server.jobs:
+ results = job.framesStatus()
+ rowTable(
+ """""" % job.id +
+ """""" % job.id,
+ job.id,
+ link(job.name, "/html/job" + job.id),
+ job.category if job.category else "None",
+ str(job.chunks) +
+ """""" % (job.id, job.chunks + 1) +
+ """""" % (job.id, job.chunks - 1, "disabled=True" if job.chunks == 1 else ""),
+ str(job.priority) +
+ """""" % (job.id, job.priority + 1) +
+ """""" % (job.id, job.priority - 1, "disabled=True" if job.priority == 1 else ""),
+ "%0.1f%%" % (job.usage * 100),
+ "%is" % int(time.time() - job.last_dispatched),
+ job.statusText(),
+ len(job),
+ results[DONE],
+ results[DISPATCHED],
+ str(results[ERROR]) +
+ """""" % (job.id, "disabled=True" if not results[ERROR] else ""),
+ handler.server.balancer.applyPriorities(job), handler.server.balancer.applyExceptions(job)
+ )
+
+ endTable()
+
+ output("