NodeBuffer(
Subreport: com.simiacryptus.mindseye.art.examples.TextureTiledRotorEC2%24_a23edab6-b65e-4575-9c3e-65d5e99e2143_log_com.simiacryptus.ref.lang.ReferenceCountingBase
Code from TextureTiledRotor.scala:73 executed in 0.00 seconds (0.000 gc):
() => {
implicit val _ = log
// First, basic configuration so we publish to our s3 site
log.setArchiveHome(URI.create(s"s3://$s3bucket/$className/${log.getId}/"))
log.onComplete(() => upload(log): Unit)
log.p(log.jpg(ImageArtUtil.load(log, styleUrl, (maxResolution * Math.sqrt(magnification)).toInt), "Input Style"))
val canvas = new RefAtomicReference[Tensor](null)
def rotatedCanvas = {
var input = canvas.get()
if (null == input) input else {
val viewLayer = getKaleidoscope(input.getDimensions)
val result = viewLayer.eval(input)
viewLayer.freeRef()
val data = result.getData
result.freeRef()
val tensor = data.get(0)
data.freeRef()
tensor
}
}
// Generates a pretiled image (e.g. 3x3) to display
def tiledCanvas = {
var input = rotatedCanvas
if (null == input) input else {
val layer = new ImgTileAssemblyLayer(rowsAndCols, rowsAndCols)
val result = layer.eval((1 to (rowsAndCols * rowsAndCols)).map(_ => input.addRef()): _*)
layer.freeRef()
input.freeRef()
val data = result.getData
result.freeRef()
val tensor = data.get(0)
data.freeRef()
tensor
}
}
// Kaleidoscope+Tiling layer used by the optimization engine.
// Expands the canvas by a small amount, using tile wrap to draw in the expanded boundary.
def viewLayer(dims: Seq[Int]) = {
val padding = Math.min(256, Math.max(16, dims(0) / 2))
val viewLayer = getKaleidoscope(dims.toArray)
val layer = new ImgViewLayer(dims(0) + padding, dims(1) + padding, true)
layer.setOffsetX(-padding / 2)
layer.setOffsetY(-padding / 2)
viewLayer.add(layer).freeRef()
viewLayer
}
// Execute the main process while registered with the site index
val registration = registerWithIndexJPG(() => tiledCanvas)
try {
// Display a pre-tiled image inside the report itself
withMonitoredJpg(() => {
val tiledCanvas1 = tiledCanvas
val toImage = tiledCanvas1.toImage
tiledCanvas1.freeRef()
toImage
}) {
// Display an additional, non-tiled image of the canvas
withMonitoredJpg(() => Option(rotatedCanvas).map(tensor => {
val image = tensor.toRgbImage
tensor.freeRef()
image
}).orNull) {
log.subreport("Painting", (sub: NotebookOutput) => {
paint(initUrl, initUrl, canvas.addRef(), new VisualStyleNetwork(
styleLayers = List(
// We select all the lower-level layers to achieve a good balance between speed and accuracy.
VGG19.VGG19_0a,
VGG19.VGG19_0b,
VGG19.VGG19_1a,
VGG19.VGG19_1b1,
VGG19.VGG19_1b2,
VGG19.VGG19_1c1,
VGG19.VGG19_1c2,
VGG19.VGG19_1c3,
VGG19.VGG19_1e1,
VGG19.VGG19_1e2,
VGG19.VGG19_1e3
),
styleModifiers = List(
// These two operators are a good combination for a vivid yet accurate style
{
new GramMatrixEnhancer()
.setMinMax(-5, 5)
//.scale(0.5)
},
{
new MomentMatcher()
}
),
styleUrl = List(styleUrl),
magnification = magnification,
viewLayer = viewLayer
), new BasicOptimizer {
override val trainingMinutes: Int = 30
override val trainingIterations: Int = 10
override val maxRate = 1e9
override def trustRegion(layer: Layer): TrustRegion = null
override def renderingNetwork(dims: Seq[Int]) = getKaleidoscope(dims.toArray)
}, new GeometricSequence {
override val min: Double = minResolution
override val max: Double = maxResolution
override val steps = TextureTiledRotor.this.steps
}.toStream.map(_.round.toDouble): _*)(sub)
null
})
uploadAsync(log)
}(log)
}
null
} finally {
registration.foreach(_.stop()(s3client, ec2client))
}
}
<function0>